language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
if (mRecyclerView == null) {
return;
}
final int count = mRecyclerView.getAdapter().getItemCount();
if (count == 0) {
return;
}
// mCurrentPage is -1 on first start and after orientation changed. If so, retrieve the correct index from viewpager.
if (mCurrentPage == -1 && mRecyclerView != null) {
mCurrentPage = ((RecyclerViewPager)mRecyclerView).getCurrentPosition();//.getCurrentItem();
}
//Calculate views bounds
ArrayList<Rect> bounds = calculateAllBounds(mPaintText);
final int boundsSize = bounds.size();
//Make sure we're on a page that still exists
if (mCurrentPage >= boundsSize) {
setCurrentItem(boundsSize - 1);
return;
}
final int countMinusOne = count - 1;
final float halfWidth = getWidth() / 2f;
final int left = getLeft();
final float leftClip = left + mClipPadding;
final int width = getWidth();
int height = getHeight();
final int right = left + width;
final float rightClip = right - mClipPadding;
int page = mCurrentPage;
float offsetPercent;
if (mPageOffset <= 0.5) {
offsetPercent = mPageOffset;
} else {
page += 1;
offsetPercent = 1 - mPageOffset;
}
final boolean currentSelected = (offsetPercent <= SELECTION_FADE_PERCENTAGE);
final boolean currentBold = (offsetPercent <= BOLD_FADE_PERCENTAGE);
final float selectedPercent = (SELECTION_FADE_PERCENTAGE - offsetPercent) / SELECTION_FADE_PERCENTAGE;
//Verify if the current view must be clipped to the screen
Rect curPageBound = bounds.get(mCurrentPage);
float curPageWidth = curPageBound.right - curPageBound.left;
if (curPageBound.left < leftClip) {
//Try to clip to the screen (left side)
clipViewOnTheLeft(curPageBound, curPageWidth, left);
}
if (curPageBound.right > rightClip) {
//Try to clip to the screen (right side)
clipViewOnTheRight(curPageBound, curPageWidth, right);
}
//Left views starting from the current position
if (mCurrentPage > 0) {
for (int i = mCurrentPage - 1; i >= 0; i--) {
Rect bound = bounds.get(i);
//Is left side is outside the screen
if (bound.left < leftClip) {
int w = bound.right - bound.left;
//Try to clip to the screen (left side)
clipViewOnTheLeft(bound, w, left);
//Except if there's an intersection with the right view
Rect rightBound = bounds.get(i + 1);
//Intersection
if (bound.right + mTitlePadding > rightBound.left) {
bound.left = (int) (rightBound.left - w - mTitlePadding);
bound.right = bound.left + w;
}
}
}
}
//Right views starting from the current position
if (mCurrentPage < countMinusOne) {
for (int i = mCurrentPage + 1 ; i < count; i++) {
Rect bound = bounds.get(i);
//If right side is outside the screen
if (bound.right > rightClip) {
int w = bound.right - bound.left;
//Try to clip to the screen (right side)
clipViewOnTheRight(bound, w, right);
//Except if there's an intersection with the left view
Rect leftBound = bounds.get(i - 1);
//Intersection
if (bound.left - mTitlePadding < leftBound.right) {
bound.left = (int) (leftBound.right + mTitlePadding);
bound.right = bound.left + w;
}
}
}
}
//Now draw views
int colorTextAlpha = mColorText >>> 24;
for (int i = 0; i < count; i++) {
//Get the title
Rect bound = bounds.get(i);
//Only if one side is visible
if ((bound.left > left && bound.left < right) || (bound.right > left && bound.right < right)) {
final boolean currentPage = (i == page);
final CharSequence pageTitle = getTitle(i);
//Only set bold if we are within bounds
mPaintText.setFakeBoldText(currentPage && currentBold && mBoldText);
//Draw text as unselected
mPaintText.setColor(mColorText);
if(currentPage && currentSelected) {
//Fade out/in unselected text as the selected text fades in/out
mPaintText.setAlpha(colorTextAlpha - (int)(colorTextAlpha * selectedPercent));
}
//Except if there's an intersection with the right view
if (i < boundsSize - 1) {
Rect rightBound = bounds.get(i + 1);
//Intersection
if (bound.right + mTitlePadding > rightBound.left) {
int w = bound.right - bound.left;
bound.left = (int) (rightBound.left - w - mTitlePadding);
bound.right = bound.left + w;
}
}
canvas.drawText(pageTitle, 0, pageTitle.length(), bound.left, bound.bottom + mTopPadding, mPaintText);
//If we are within the selected bounds draw the selected text
if (currentPage && currentSelected) {
mPaintText.setColor(mColorSelected);
mPaintText.setAlpha((int)((mColorSelected >>> 24) * selectedPercent));
canvas.drawText(pageTitle, 0, pageTitle.length(), bound.left, bound.bottom + mTopPadding, mPaintText);
}
}
}
//If we want the line on the top change height to zero and invert the line height to trick the drawing code
float footerLineHeight = mFooterLineHeight;
float footerIndicatorLineHeight = mFooterIndicatorHeight;
if (mLinePosition == LinePosition.Top) {
height = 0;
footerLineHeight = -footerLineHeight;
footerIndicatorLineHeight = -footerIndicatorLineHeight;
}
//Draw the footer line
mPath.reset();
mPath.moveTo(0, height - footerLineHeight / 2f);
mPath.lineTo(width, height - footerLineHeight / 2f);
mPath.close();
canvas.drawPath(mPath, mPaintFooterLine);
float heightMinusLine = height - footerLineHeight;
switch (mFooterIndicatorStyle) {
case Triangle:
mPath.reset();
mPath.moveTo(halfWidth, heightMinusLine - footerIndicatorLineHeight);
mPath.lineTo(halfWidth + footerIndicatorLineHeight, heightMinusLine);
mPath.lineTo(halfWidth - footerIndicatorLineHeight, heightMinusLine);
mPath.close();
canvas.drawPath(mPath, mPaintFooterIndicator);
break;
case Underline:
if (!currentSelected || page >= boundsSize) {
break;
}
Rect underlineBounds = bounds.get(page);
final float rightPlusPadding = underlineBounds.right + mFooterIndicatorUnderlinePadding;
final float leftMinusPadding = underlineBounds.left - mFooterIndicatorUnderlinePadding;
final float heightMinusLineMinusIndicator = heightMinusLine - footerIndicatorLineHeight;
mPath.reset();
mPath.moveTo(leftMinusPadding, heightMinusLine);
mPath.lineTo(rightPlusPadding, heightMinusLine);
mPath.lineTo(rightPlusPadding, heightMinusLineMinusIndicator);
mPath.lineTo(leftMinusPadding, heightMinusLineMinusIndicator);
mPath.close();
mPaintFooterIndicator.setAlpha((int)(0xFF * selectedPercent));
canvas.drawPath(mPath, mPaintFooterIndicator);
mPaintFooterIndicator.setAlpha(0xFF);
break;
}
}
|
python
|
def create_main_synopsis(self, parser):
""" create synopsis from main parser """
self.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
usage = self._format_usage(None, parser._actions,
parser._mutually_exclusive_groups, '')
usage = usage.replace('%s ' % self._prog, '')
usage = '.SH SYNOPSIS\n \\fB%s\\fR %s\n' % (self._markup(self._prog),
usage)
return usage
|
python
|
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
result = None
if env.host_string == 'localhost':
result = update_or_append_local(filename, prefix, new_line,
keep_backup, append)
else:
tmp_dir = tempfile.mkdtemp(suffix='', prefix='fabsetup_')
# fabric.api.local(flo('chmod 777 {tmp_dir}'))
local_path = os.path.join(tmp_dir, os.path.basename(filename))
fabric.operations.get(remote_path=filename, local_path=local_path,
use_sudo=True, temp_dir='/tmp')
result = update_or_append_local(local_path, prefix, new_line,
keep_backup, append)
put(local_path, remote_path=filename, use_sudo=True, temp_dir='/tmp')
with quiet():
fabric.api.local(flo('rm -rf {tmp_dir}'))
return result
|
python
|
def _query(self): # pylint: disable=E0202
"""
Query WMI using WMI Query Language (WQL) & parse the results.
Returns: List of WMI objects or `TimeoutException`.
"""
formated_property_names = ",".join(self.property_names)
wql = "Select {property_names} from {class_name}{filters}".format(
property_names=formated_property_names, class_name=self.class_name, filters=self.formatted_filters
)
self.logger.debug(u"Querying WMI: {0}".format(wql))
try:
# From: https://msdn.microsoft.com/en-us/library/aa393866(v=vs.85).aspx
flag_return_immediately = 0x10 # Default flag.
flag_forward_only = 0x20
flag_use_amended_qualifiers = 0x20000
query_flags = flag_return_immediately | flag_forward_only
# For the first query, cache the qualifiers to determine each
# propertie's "CounterType"
includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None
if includes_qualifiers:
self._property_counter_types = CaseInsensitiveDict()
query_flags |= flag_use_amended_qualifiers
raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags)
results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers)
except pywintypes.com_error:
self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True)
results = []
return results
|
python
|
def accept(self):
"""Accept a connection. The socket must be bound to an address
and listening for connections. The return value is a new
socket object usable to send and receive data on the
connection."""
socket = Socket(self._llc, None)
socket._tco = self.llc.accept(self._tco)
return socket
|
java
|
private boolean visitColumnsAndColumnFacets(VisitContext context,
VisitCallback callback,
boolean visitRows) {
if (visitRows) {
setRowIndex(-1);
}
if (getChildCount() > 0) {
for (UIComponent column : getChildren()) {
if (column instanceof UIColumn) {
VisitResult result = context.invokeVisitCallback(column, callback); // visit the column directly
if (result == VisitResult.COMPLETE) {
return true;
}
if (column.getFacetCount() > 0) {
for (UIComponent columnFacet : column.getFacets().values()) {
if (columnFacet.visitTree(context, callback)) {
return true;
}
}
}
}
}
}
return false;
}
|
java
|
private static boolean isInternetReachable()
{
try {
final URL url = new URL("http://www.google.com");
final HttpURLConnection urlConnect = (HttpURLConnection)url.openConnection();
urlConnect.setConnectTimeout(1000);
urlConnect.getContent();
urlConnect.disconnect();
} catch (UnknownHostException e) {
return false;
} catch (IOException e) {
return false;
}
return true;
}
|
python
|
def serialize_bytes(data):
"""Write bytes by using Telegram guidelines"""
if not isinstance(data, bytes):
if isinstance(data, str):
data = data.encode('utf-8')
else:
raise TypeError(
'bytes or str expected, not {}'.format(type(data)))
r = []
if len(data) < 254:
padding = (len(data) + 1) % 4
if padding != 0:
padding = 4 - padding
r.append(bytes([len(data)]))
r.append(data)
else:
padding = len(data) % 4
if padding != 0:
padding = 4 - padding
r.append(bytes([
254,
len(data) % 256,
(len(data) >> 8) % 256,
(len(data) >> 16) % 256
]))
r.append(data)
r.append(bytes(padding))
return b''.join(r)
|
java
|
public static String normalize(String path) {
if (path == null) {
return null;
}
// 兼容Spring风格的ClassPath路径,去除前缀,不区分大小写
String pathToUse = StrUtil.removePrefixIgnoreCase(path, URLUtil.CLASSPATH_URL_PREFIX);
// 去除file:前缀
pathToUse = StrUtil.removePrefixIgnoreCase(pathToUse, URLUtil.FILE_URL_PREFIX);
// 统一使用斜杠
pathToUse = pathToUse.replaceAll("[/\\\\]{1,}", StrUtil.SLASH).trim();
int prefixIndex = pathToUse.indexOf(StrUtil.COLON);
String prefix = "";
if (prefixIndex > -1) {
// 可能Windows风格路径
prefix = pathToUse.substring(0, prefixIndex + 1);
if (StrUtil.startWith(prefix, StrUtil.C_SLASH)) {
// 去除类似于/C:这类路径开头的斜杠
prefix = prefix.substring(1);
}
if (false == prefix.contains(StrUtil.SLASH)) {
pathToUse = pathToUse.substring(prefixIndex + 1);
} else {
// 如果前缀中包含/,说明非Windows风格path
prefix = StrUtil.EMPTY;
}
}
if (pathToUse.startsWith(StrUtil.SLASH)) {
prefix += StrUtil.SLASH;
pathToUse = pathToUse.substring(1);
}
List<String> pathList = StrUtil.split(pathToUse, StrUtil.C_SLASH);
List<String> pathElements = new LinkedList<String>();
int tops = 0;
String element;
for (int i = pathList.size() - 1; i >= 0; i--) {
element = pathList.get(i);
if (StrUtil.DOT.equals(element)) {
// 当前目录,丢弃
} else if (StrUtil.DOUBLE_DOT.equals(element)) {
tops++;
} else {
if (tops > 0) {
// 有上级目录标记时按照个数依次跳过
tops--;
} else {
// Normal path element found.
pathElements.add(0, element);
}
}
}
return prefix + CollUtil.join(pathElements, StrUtil.SLASH);
}
|
java
|
protected void fillResource(CmsObject cms, Element element, CmsResource res) {
String xpath = element.getPath();
int pos = xpath.lastIndexOf("/" + XmlNode.GroupContainers.name() + "/");
if (pos > 0) {
xpath = xpath.substring(pos + 1);
}
CmsRelationType type = getHandler().getRelationType(xpath);
CmsXmlVfsFileValue.fillEntry(element, res.getStructureId(), res.getRootPath(), type);
}
|
java
|
public static List<String> childrenText(Element parentElement, String tagname) {
final Iterable<Element> children = children(parentElement, tagname);
List<String> result = new ArrayList<String>();
for (Element element : children) {
result.add(elementText(element));
}
return result;
}
|
python
|
def _get_migrate_funcs(cls, orig_version, target_version):
"""
>>> @Manager.register
... def v1_to_2(manager, doc):
... doc['foo'] = 'bar'
>>> @Manager.register
... def v2_to_1(manager, doc):
... del doc['foo']
>>> @Manager.register
... def v2_to_3(manager, doc):
... doc['foo'] = doc['foo'] + ' baz'
>>> funcs = list(Manager._get_migrate_funcs(1, 3))
>>> len(funcs)
2
>>> funcs == [v1_to_2, v2_to_3]
True
>>> funcs = list(Manager._get_migrate_funcs(2, 1))
>>> len(funcs)
1
>>> funcs == [v2_to_1]
True
>>> Manager._upgrade_funcs.clear()
"""
direction = 1 if target_version > orig_version else -1
versions = range(orig_version, target_version + direction, direction)
transitions = recipes.pairwise(versions)
return itertools.starmap(cls._get_func, transitions)
|
python
|
def _update_noise(self, peak_num):
"""
Update live noise parameters
"""
i = self.peak_inds_i[peak_num]
self.noise_amp_recent = (0.875*self.noise_amp_recent
+ 0.125*self.sig_i[i])
return
|
python
|
def setup(self):
"""Setup."""
self.context_visible_first = self.config['context_visible_first']
self.delimiters = []
self.escapes = None
self.line_endings = self.config['normalize_line_endings']
escapes = []
for delimiter in self.config['delimiters']:
if not isinstance(delimiter, dict):
continue
group = util.random_name_gen()
while (
group in delimiter['open'] or
group in delimiter['close'] or
group in delimiter.get('content', DEFAULT_CONTENT)
):
group = util.random_name_gen()
pattern = r'%s(?P<%s>%s)(?:%s|\Z)' % (
delimiter['open'],
group,
delimiter.get('content', DEFAULT_CONTENT),
delimiter['close']
)
self.delimiters.append((re.compile(pattern, re.M), group))
escapes = self.config['escapes']
if escapes:
self.escapes = re.compile(escapes)
|
python
|
def set_display_name(self, display_name):
"""Sets a display name.
A display name is required and if not set, will be set by the
provider.
arg: display_name (string): the new display name
raise: InvalidArgument - ``display_name`` is invalid
raise: NoAccess - ``Metadata.isReadonly()`` is ``true``
raise: NullArgument - ``display_name`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._my_map['displayName'] = self._get_display_text(display_name, self.get_display_name_metadata())
|
python
|
def clean_sequences(self):
"""Removes reads/contigs that contain plasmids, and masks phage sequences."""
logging.info('Removing plasmids and masking phages')
plasmid_db = os.path.join(self.reffilepath, 'plasmidfinder', 'plasmid_database.fa')
phage_db = os.path.join(self.reffilepath, 'prophages', 'combinedtargets.tfa')
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
plasmid_removal = 'bbduk.sh ref={} in={} out={} overwrite'\
.format(plasmid_db, sample.general.combined, sample.general.combined.replace('.f', '_noplasmid.f'))
subprocess.call(plasmid_removal, shell=True, stdout=self.devnull, stderr=self.devnull)
phage_masking = 'bbduk.sh ref={} in={} out={} kmask=N overwrite'\
.format(phage_db, sample.general.combined.replace('.f', '_noplasmid.f'),
sample.general.combined.replace('.f', '_clean.f'))
subprocess.call(phage_masking, shell=True, stdout=self.devnull, stderr=self.devnull)
os.remove(sample.general.combined)
os.rename(sample.general.combined.replace('.f', '_clean.f'), sample.general.combined)
os.remove(sample.general.combined.replace('.f', '_noplasmid.f'))
|
python
|
def network_profiles(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`NetworkProfilesOperations<azure.mgmt.network.v2018_08_01.operations.NetworkProfilesOperations>`
"""
api_version = self._get_api_version('network_profiles')
if api_version == '2018-08-01':
from .v2018_08_01.operations import NetworkProfilesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
java
|
public boolean unresolveType(final Class<? extends Entity> type) {
return resolved.remove(Objects.requireNonNull(type)) != null;
}
|
python
|
def wait_for_ready(self, instance_id, limit=14400, delay=10, pending=False):
"""Determine if a Server is ready.
A server is ready when no transactions are running on it.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of seconds to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10.
"""
now = time.time()
until = now + limit
mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]"
instance = self.get_hardware(instance_id, mask=mask)
while now <= until:
if utils.is_ready(instance, pending):
return True
transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName')
snooze = min(delay, until - now)
LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze)
time.sleep(snooze)
instance = self.get_hardware(instance_id, mask=mask)
now = time.time()
LOGGER.info("Waiting for %d expired.", instance_id)
return False
|
java
|
private void updateTail() {
// Either tail already points to an active node, or we keep
// trying to cas it to the last node until it does.
Node<E> t, p, q;
restartFromTail:
while ((t = tail).item == null && (p = t.next) != null) {
for (;;) {
if ((q = p.next) == null ||
(q = (p = q).next) == null) {
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (TAIL.compareAndSet(this, t, p))
return;
else
continue restartFromTail;
}
else if (t != tail)
continue restartFromTail;
else
p = q;
}
}
}
|
java
|
private static Set<CmsResource> getAllResourcesInModule(CmsObject cms, CmsModule module) throws CmsException {
Set<CmsResource> result = new HashSet<>();
for (CmsResource resource : CmsModule.calculateModuleResources(cms, module)) {
result.add(resource);
if (resource.isFolder()) {
result.addAll(cms.readResources(resource, CmsResourceFilter.ALL, true));
}
}
return result;
}
|
java
|
public DescribeWorkspaceImagesResult withImages(WorkspaceImage... images) {
if (this.images == null) {
setImages(new com.amazonaws.internal.SdkInternalList<WorkspaceImage>(images.length));
}
for (WorkspaceImage ele : images) {
this.images.add(ele);
}
return this;
}
|
python
|
def get_all_build_configs_by_labels(self, label_selectors):
"""
Returns all builds matching a given set of label selectors. It is up to the
calling function to filter the results.
"""
labels = ['%s=%s' % (field, value) for field, value in label_selectors]
labels = ','.join(labels)
url = self._build_url("buildconfigs/", labelSelector=labels)
return self._get(url).json()['items']
|
python
|
def generate_cache_key(value):
"""
Generates a cache key for the *args and **kwargs
"""
if is_bytes(value):
return hashlib.md5(value).hexdigest()
elif is_text(value):
return generate_cache_key(to_bytes(text=value))
elif is_boolean(value) or is_null(value) or is_number(value):
return generate_cache_key(repr(value))
elif is_dict(value):
return generate_cache_key((
(key, value[key])
for key
in sorted(value.keys())
))
elif is_list_like(value) or isinstance(value, collections.abc.Generator):
return generate_cache_key("".join((
generate_cache_key(item)
for item
in value
)))
else:
raise TypeError("Cannot generate cache key for value {0} of type {1}".format(
value,
type(value),
))
|
java
|
public static MappedByteBuffer mapExistingFile(
final File location, final String descriptionLabel, final long offset, final long length)
{
return mapExistingFile(location, READ_WRITE, descriptionLabel, offset, length);
}
|
python
|
def collect_vocab(qp_pairs):
'''
Build the vocab from corpus.
'''
vocab = set()
for qp_pair in qp_pairs:
for word in qp_pair['question_tokens']:
vocab.add(word['word'])
for word in qp_pair['passage_tokens']:
vocab.add(word['word'])
return vocab
|
python
|
def string_tokenizer(self, untokenized_string: str, include_blanks=False):
"""
This function is based off CLTK's line tokenizer. Use this for strings
rather than .txt files.
input: '20. u2-sza-bi-la-kum\n1. a-na ia-as2-ma-ah-{d}iszkur#\n2.
qi2-bi2-ma\n3. um-ma {d}utu-szi-{d}iszkur\n'
output:['20. u2-sza-bi-la-kum', '1. a-na ia-as2-ma-ah-{d}iszkur#',
'2. qi2-bi2-ma']
:param untokenized_string: string
:param include_blanks: instances of empty lines
:return: lines as strings in list
"""
line_output = []
assert isinstance(untokenized_string, str), \
'Incoming argument must be a string.'
if include_blanks:
tokenized_lines = untokenized_string.splitlines()
else:
tokenized_lines = [line for line in untokenized_string.splitlines()
if line != r'\\n']
for line in tokenized_lines:
# Strip out damage characters
if not self.damage: # Add 'xn' -- missing sign or number?
line = ''.join(c for c in line if c not in "#[]?!*")
re.match(r'^\d*\.|\d\'\.', line)
line_output.append(line.rstrip())
return line_output
|
python
|
def getTraitCovarStdErrors(self,term_i):
"""
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar)
Args:
term_i: index of the term we are interested in
"""
assert self.init, 'GP not initialised'
assert self.fast==False, 'Not supported for fast implementation'
if self.P==1:
out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i]
else:
C = self.vd.getTerm(term_i).getTraitCovar()
n_params = C.getNumberParams()
par_index = 0
for term in range(term_i-1):
par_index += self.vd.getTerm(term_i).getNumberScales()
Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)]
out = sp.zeros((self.P,self.P))
for param_i in range(n_params):
out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i]
for param_j in range(param_i):
out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j]
out = sp.sqrt(out)
return out
|
java
|
private String getAbsoluteName() {
File f = getAbsoluteFile();
String name = f.getPath();
if (f.isDirectory() && name.charAt(name.length() - 1) != separatorChar) {
// Directories must end with a slash
name = name + "/";
}
if (separatorChar != '/') { // Must convert slashes.
name = name.replace(separatorChar, '/');
}
return name;
}
|
java
|
public void setResourceLoader(ResourceLoader resourceLoader) {
this.resourceLoader = resourceLoader;
this.xmlReader.setResourceLoader(resourceLoader);
this.scanner.setResourceLoader(resourceLoader);
}
|
python
|
def _set_base_dn(self):
"""Get Base DN from LDAP"""
results = self._search(
'cn=config',
'(objectClass=*)',
['nsslapd-defaultnamingcontext'],
scope=ldap.SCOPE_BASE
)
if results and type(results) is list:
dn, attrs = results[0]
r = attrs['nsslapd-defaultnamingcontext'][0].decode('utf-8')
else:
raise Exception
self._base_dn = r
self._active_user_base = 'cn=users,cn=accounts,' + self._base_dn
self._stage_user_base = 'cn=staged users,cn=accounts,cn=provisioning,' + self._base_dn
self._preserved_user_base = 'cn=deleted users,cn=accounts,cn=provisioning,' + self._base_dn
self._groups_base = 'cn=groups,cn=accounts,' + self._base_dn
log.debug('Base DN: %s' % self._base_dn)
|
java
|
protected boolean invisibleHydrogen(IAtom atom, RendererModel model) {
return isHydrogen(atom) && !(Boolean) model.get(ShowExplicitHydrogens.class);
}
|
python
|
def from_json(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> T:
"""From json string to instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_json('''{
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... }''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict)
|
java
|
public static void addJob(
String jobName,
Class<? extends Job> jobClass,
Map<String, Object> params,
boolean isConcurrencyAllowed)
throws SundialSchedulerException {
try {
JobDataMap jobDataMap = new JobDataMap();
if (params != null) {
for (Entry<String, Object> entry : params.entrySet()) {
jobDataMap.put(entry.getKey(), entry.getValue());
}
}
JobDetail jobDetail =
newJobBuilder(jobClass)
.withIdentity(jobName)
.usingJobData(jobDataMap)
.isConcurrencyAllowed(isConcurrencyAllowed)
.build();
getScheduler().addJob(jobDetail);
} catch (SchedulerException e) {
logger.error("ERROR ADDING JOB!!!", e);
throw new SundialSchedulerException("ERROR ADDING JOB!!!", e);
}
}
|
python
|
def prior_tuples(self):
"""
Returns
-------
priors: [(String, Prior))]
"""
return [prior for tuple_prior in self.tuple_prior_tuples for prior in
tuple_prior[1].prior_tuples] + self.direct_prior_tuples + [prior for prior_model in
self.prior_model_tuples
for prior in
prior_model[1].prior_tuples]
|
java
|
public void marshall(EsamManifestConfirmConditionNotification esamManifestConfirmConditionNotification, ProtocolMarshaller protocolMarshaller) {
if (esamManifestConfirmConditionNotification == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(esamManifestConfirmConditionNotification.getMccXml(), MCCXML_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
if self.service.restart_required:
self.service.restart(120)
return self
|
java
|
public void put(String keyStr, SoyData value) {
List<String> keys = split(keyStr, '.');
int numKeys = keys.size();
CollectionData collectionData = this;
for (int i = 0; i <= numKeys - 2; ++i) {
SoyData nextSoyData = collectionData.getSingle(keys.get(i));
if (nextSoyData != null && !(nextSoyData instanceof CollectionData)) {
throw new SoyDataException("Failed to evaluate key string \"" + keyStr + "\" for put().");
}
CollectionData nextCollectionData = (CollectionData) nextSoyData;
if (nextCollectionData == null) {
// Create the SoyData object that will be bound to keys.get(i). We need to check the first
// part of keys[i+1] to know whether to create a SoyMapData or SoyListData (checking the
// first char is sufficient).
nextCollectionData =
(Character.isDigit(keys.get(i + 1).charAt(0))) ? new SoyListData() : new SoyMapData();
collectionData.putSingle(keys.get(i), nextCollectionData);
}
collectionData = nextCollectionData;
}
collectionData.putSingle(keys.get(numKeys - 1), ensureValidValue(value));
}
|
java
|
public synchronized Widget measureChild(final int dataIndex, boolean calculateOffset) {
Log.d(Log.SUBSYSTEM.LAYOUT, TAG, "measureChild dataIndex = %d", dataIndex);
Widget widget = mContainer.get(dataIndex);
if (widget != null) {
synchronized (mMeasuredChildren) {
mMeasuredChildren.add(dataIndex);
}
}
return widget;
}
|
java
|
public int getRowStart(int rowIndex) {
if (isCompacted) {
throw new IllegalStateException(
"Illegal Invocation of the method after compact()");
}
if (rowIndex < 0 || rowIndex > rows) {
throw new IllegalArgumentException("rowIndex out of bound!");
}
return v[rowIndex * columns];
}
|
python
|
def handle_onchain_secretreveal(
target_state: TargetTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ContractReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=target_state.transfer.lock.secrethash,
secret=state_change.secret,
)
if valid_secret:
channel.register_onchain_secret(
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
secret_reveal_block_number=state_change.block_number,
)
target_state.state = TargetTransferState.ONCHAIN_UNLOCK
target_state.secret = state_change.secret
return TransitionResult(target_state, list())
|
python
|
def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r)
|
python
|
def register_model(self, model):
"""
Register ``model`` to this group
:param model: model name
:return: None
"""
assert isinstance(model, str)
if model not in self.all_models:
self.all_models.append(model)
|
python
|
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
|
java
|
public ResultList<Keyword> getTVKeywords(int tvID) throws MovieDbException {
TmdbParameters parameters = new TmdbParameters();
parameters.add(Param.ID, tvID);
URL url = new ApiUrl(apiKey, MethodBase.TV).subMethod(MethodSub.KEYWORDS).buildUrl(parameters);
WrapperGenericList<Keyword> wrapper = processWrapper(getTypeReference(Keyword.class), url, "keywords");
return wrapper.getResultsList();
}
|
python
|
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]:
"""
Wraps jsonschema.validate, returning the same object passed in.
Args:
request: The deserialized-from-json request.
schema: The jsonschema schema to validate against.
Raises:
jsonschema.ValidationError
"""
jsonschema_validate(request, schema)
return request
|
java
|
private static boolean parseInputArgs(String[] args) {
CommandLineParser parser = new DefaultParser();
CommandLine cmd;
try {
cmd = parser.parse(OPTIONS, args);
} catch (ParseException e) {
System.out.println("Failed to parse input args: " + e);
return false;
}
sHelp = cmd.hasOption("help");
sJournalDirectoryV0 = cmd.getOptionValue("journalDirectoryV0",
ServerConfiguration.get(PropertyKey.MASTER_JOURNAL_FOLDER));
return true;
}
|
python
|
def _get_attribute_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto]
"""Create an iterator of tensors from node attributes of an ONNX model."""
for node in onnx_model_proto.graph.node:
for attribute in node.attribute:
if attribute.HasField("t"):
yield attribute.t
for tensor in attribute.tensors:
yield tensor
|
java
|
public GetAggregateConfigRuleComplianceSummaryResult withAggregateComplianceCounts(AggregateComplianceCount... aggregateComplianceCounts) {
if (this.aggregateComplianceCounts == null) {
setAggregateComplianceCounts(new com.amazonaws.internal.SdkInternalList<AggregateComplianceCount>(aggregateComplianceCounts.length));
}
for (AggregateComplianceCount ele : aggregateComplianceCounts) {
this.aggregateComplianceCounts.add(ele);
}
return this;
}
|
java
|
public static Pipeline watchers(MavenSession session, File baseDir, Mojo mojo, boolean pomFileMonitoring) {
return new Pipeline(mojo, baseDir, Watchers.all(session), pomFileMonitoring);
}
|
java
|
public static <T extends UIObject> String animate(final T widget, final String animation, final int count) {
return animate(widget, animation, count, -1, -1);
}
|
java
|
public JsStatement warningDialog(String message)
{
JsStatement statement = new JsStatement();
statement.append("$.ui.dialog.wiquery.warningDialog(");
statement.append("" + Session.get().nextSequenceValue() + ", ");
statement.append(DialogUtilsLanguages.getDialogUtilsLiteral(DialogUtilsLanguages
.getDialogUtilsLanguages(getLocale())) + ", ");
statement.append(JsUtils.doubleQuotes(message, true) + ", ");
statement.append(JsUtils.quotes(RequestCycle.get().urlFor(WARNING_IMG, null)) + ")");
return statement;
}
|
java
|
public static byte[] getBytes(Object obj) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(obj);
oos.flush();
oos.close();
bos.close();
byte[] data = bos.toByteArray();
return data;
}
|
python
|
def rlmf_tictactoe():
"""Base set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.game = "tictactoe"
hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0"
# Since we don't have any no-op actions, otherwise we have to have an
# attribute called `get_action_meanings`.
hparams.eval_max_num_noops = 0
hparams.max_num_noops = 0
hparams.rl_should_derive_observation_space = False
hparams.policy_network = "feed_forward_categorical_policy"
hparams.base_algo_params = "ppo_ttt_params"
# Number of last observations to feed to the agent
hparams.frame_stack_size = 1
return hparams
|
python
|
def _check_markers(task_ids, offset=10):
"""Returns a flag for markers being found for the task_ids. If all task ids
have markers True will be returned. Otherwise it will return False as soon
as a None result is hit.
"""
shuffle(task_ids)
has_errors = False
for index in xrange(0, len(task_ids), offset):
keys = [ndb.Key(FuriousAsyncMarker, id)
for id in task_ids[index:index + offset]]
markers = ndb.get_multi(keys)
if not all(markers):
logging.debug("Not all Async's complete")
return False, None
# Did any of the aync's fail? Check the success property on the
# AsyncResult.
has_errors = not all((marker.success for marker in markers))
return True, has_errors
|
java
|
protected Long doResourceReplayCachedContent(
IPortletWindow portletWindow,
HttpServletRequest httpServletRequest,
CacheState<CachedPortletResourceData<Long>, Long> cacheState,
PortletResourceOutputHandler portletOutputHandler,
long baseExecutionTime)
throws IOException {
enforceConfigPermission(httpServletRequest, portletWindow);
logger.debug("Replaying cached content for resource request to {}", portletWindow);
final long renderStartTime = System.nanoTime();
final CachedPortletResourceData<Long> cachedPortletResourceData =
cacheState.getCachedPortletData();
if (cachedPortletResourceData == null) {
throw new PortletDispatchException(
"The portlet window '"
+ portletWindow
+ "' indicated via CacheControl#useCachedContent "
+ "that the portal should render cached content, however there is no cached content to return. "
+ "This is a portlet bug.",
portletWindow);
}
cachedPortletResourceData.replay(portletOutputHandler);
final long executionTime = baseExecutionTime + (System.nanoTime() - renderStartTime);
publishResourceEvent(portletWindow, httpServletRequest, executionTime, false, true);
return executionTime;
}
|
java
|
static boolean polygonRelateMultiPoint_(Polygon polygon_a,
MultiPoint multipoint_b, double tolerance, String scl,
ProgressTracker progress_tracker) {
RelationalOperationsMatrix relOps = new RelationalOperationsMatrix();
relOps.resetMatrix_();
relOps.setPredicates_(scl);
relOps.setAreaPointPredicates_();
Envelope2D env_a = new Envelope2D(), env_b = new Envelope2D();
polygon_a.queryEnvelope2D(env_a);
multipoint_b.queryEnvelope2D(env_b);
boolean bRelationKnown = false;
boolean b_disjoint = RelationalOperations.envelopeDisjointEnvelope_(
env_a, env_b, tolerance, progress_tracker);
if (b_disjoint) {
relOps.areaPointDisjointPredicates_(polygon_a);
bRelationKnown = true;
}
if (!bRelationKnown) {
// Quick rasterize test to see whether the the geometries are
// disjoint, or if one is contained in the other.
int relation = RelationalOperations
.tryRasterizedContainsOrDisjoint_(polygon_a, multipoint_b,
tolerance, false);
if (relation == RelationalOperations.Relation.disjoint) {
relOps.areaPointDisjointPredicates_(polygon_a);
bRelationKnown = true;
} else if (relation == RelationalOperations.Relation.contains) {
relOps.areaPointContainsPredicates_(polygon_a);
bRelationKnown = true;
}
}
if (!bRelationKnown) {
EditShape edit_shape = new EditShape();
int geom_a = edit_shape.addGeometry(polygon_a);
int geom_b = edit_shape.addGeometry(multipoint_b);
relOps.setEditShapeCrackAndCluster_(edit_shape, tolerance,
progress_tracker);
relOps.computeMatrixTopoGraphClusters_(geom_a, geom_b);
relOps.m_topo_graph.removeShape();
}
boolean bRelation = relationCompare_(relOps.m_matrix, relOps.m_scl);
return bRelation;
}
|
python
|
def set_censor(self, character):
"""Replaces the original censor character '*' with ``character``."""
# TODO: what if character isn't str()-able?
if isinstance(character, int):
character = str(character)
self._censor_char = character
|
python
|
def prepare_for_submission(self, folder):
"""This method is called prior to job submission with a set of calculation input nodes.
The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a
temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to
the remote machine before job submission, as well as file lists that are to be retrieved after job completion.
:param folder: an aiida.common.folders.Folder to temporarily write files on disk
:returns: CalcInfo instance
"""
from aiida_codtools.common.cli import CliParameters
try:
parameters = self.inputs.parameters.get_dict()
except AttributeError:
parameters = {}
self._validate_resources()
cli_parameters = copy.deepcopy(self._default_cli_parameters)
cli_parameters.update(parameters)
codeinfo = datastructures.CodeInfo()
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters).get_list()
codeinfo.stdin_name = self.options.input_filename
codeinfo.stdout_name = self.options.output_filename
codeinfo.stderr_name = self.options.error_filename
calcinfo = datastructures.CalcInfo()
calcinfo.uuid = str(self.uuid)
calcinfo.codes_info = [codeinfo]
calcinfo.retrieve_list = [self.options.output_filename, self.options.error_filename]
calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.filename, self.options.input_filename)]
calcinfo.remote_copy_list = []
return calcinfo
|
python
|
def coerce_many(schema=str):
"""Expect the input to be a sequence of items which conform to `schema`."""
def validate(val):
"""Apply schema check/version to each item."""
return [volup.Coerce(schema)(x) for x in val]
return validate
|
python
|
def data_url(contents, domain=DEFAULT_DOMAIN):
"""
Return the URL for embedding the GeoJSON data in the URL hash
Parameters
----------
contents - string of GeoJSON
domain - string, default http://geojson.io
"""
url = (domain + '#data=data:application/json,' +
urllib.parse.quote(contents))
return url
|
python
|
def get_surface_as_bytes(self, order=None):
"""Returns the surface area as a bytes encoded RGB image buffer.
Subclass should override if there is a more efficient conversion
than from generating a numpy array first.
"""
arr8 = self.get_surface_as_array(order=order)
return arr8.tobytes(order='C')
|
java
|
public com.google.api.ads.adwords.axis.v201809.cm.BudgetBudgetDeliveryMethod getDeliveryMethod() {
return deliveryMethod;
}
|
java
|
private double initScore() {
double score = 0;
final int n = atoms.length;
for (int i = 0; i < n; i++) {
final Point2d p1 = atoms[i].getPoint2d();
for (int j = i + 1; j < n; j++) {
if (contribution[i][j] < 0) continue;
final Point2d p2 = atoms[j].getPoint2d();
final double x = p1.x - p2.x;
final double y = p1.y - p2.y;
final double len2 = x * x + y * y;
score += contribution[j][i] = contribution[i][j] = 1 / Math.max(len2, MIN_SCORE);
}
}
return score;
}
|
java
|
@Override
public final IoBuffer putDouble(int index, double value) {
autoExpand(index, 8);
buf().putDouble(index, value);
return this;
}
|
java
|
public JobDetails getJobByJobID(String cluster, String jobId)
throws IOException {
return getJobByJobID(cluster, jobId, false);
}
|
java
|
public void addFile(String zipEntry, byte[] data) throws IOException {
stream.putNextEntry(new ZipEntry(zipEntry));
stream.write(data);
}
|
python
|
def refresh(func):
"""
Decorator that can be applied to model method that forces a refresh of the model.
Note this decorator ensures the state of the model is what is currently within
the database and therefore overwrites any current field changes.
For example, assume we have the following model:
.. code-block:: python
class MyModel(models.Model):
counter = models.IntegerField()
@refresh
def my_method(self):
print counter
Then the following is performed:
.. code-block:: python
i = MyModel.objects.create(counter=1)
i.counter = 3
i.my_method()
# prints 1
This behavior is useful in a distributed system, such as celery, where
"asserting the world is the responsibility of the task" - see http://celery.readthedocs.org/en/latest/userguide/tasks.html?highlight=model#state
Note that the refresh of the model uses the approach outlined in https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
which was discovered after from https://code.djangoproject.com/ticket/901#comment:29
which is a Django ticket which discusses a specific method 'refresh' on a
model.
"""
@wraps(func)
def inner(self, *args, **kwargs):
# Refresh the model instance - see https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
new_self = self.__class__._base_manager.using(self._state.db).get(pk=self.pk)
for f in self.__class__._meta.fields:
setattr(self, f.name, getattr(new_self, f.name))
return func(self, *args, **kwargs)
return inner
|
python
|
def restrict(self, ava, sp_entity_id, metadata=None):
""" Identity attribute names are expected to be expressed in
the local lingo (== friendlyName)
:return: A filtered ava according to the IdPs/AAs rules and
the list of required/optional attributes according to the SP.
If the requirements can't be met an exception is raised.
"""
if metadata:
spec = metadata.attribute_requirement(sp_entity_id)
if spec:
return self.filter(ava, sp_entity_id, metadata,
spec["required"], spec["optional"])
return self.filter(ava, sp_entity_id, metadata, [], [])
|
java
|
public DataSetBuilder sequence(String column, Timestamp initial, long step) {
ensureArgNotNull(initial);
return sequence(column, initial, ts -> new Timestamp(ts.getTime() + step));
}
|
java
|
public static SimpleModule getModule(final ObjectMapper mapper) {
SimpleModule module = new SimpleModule();
module.setDeserializerModifier(new BeanDeserializerModifier() {
@Override
public JsonDeserializer<?> modifyDeserializer(DeserializationConfig config, BeanDescription beanDesc, JsonDeserializer<?> deserializer) {
if (beanDesc.getBeanClass().getAnnotation(JsonFlatten.class) != null) {
return new FlatteningDeserializer(beanDesc.getBeanClass(), deserializer, mapper);
}
return deserializer;
}
});
return module;
}
|
python
|
def xview(self, *args):
"""Update inplace widgets position when doing horizontal scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.xview(self, *args)
|
python
|
def GetDataAsObject(self):
"""Retrieves the data as an object.
Returns:
object: data as a Python type or None if not available.
Raises:
WinRegistryValueError: if the value data cannot be read.
"""
if not self._data:
return None
if self._data_type in self._STRING_VALUE_TYPES:
try:
return self._data.decode('utf-16-le')
# AttributeError is raised when self._data has no decode method.
except AttributeError as exception:
raise errors.WinRegistryValueError((
'Unsupported data type: {0!s} of value: {1!s} with error: '
'{2!s}').format(type(self._data), self._name, exception))
except UnicodeError as exception:
raise errors.WinRegistryValueError(
'Unable to decode data of value: {0!s} with error: {1!s}'.format(
self._name, exception))
elif (self._data_type == definitions.REG_DWORD and
self._data_size == 4):
return self._INT32_LITTLE_ENDIAN.MapByteStream(self._data)
elif (self._data_type == definitions.REG_DWORD_BIG_ENDIAN and
self._data_size == 4):
return self._INT32_BIG_ENDIAN.MapByteStream(self._data)
elif (self._data_type == definitions.REG_QWORD and
self._data_size == 8):
return self._INT64_LITTLE_ENDIAN.MapByteStream(self._data)
elif self._data_type == definitions.REG_MULTI_SZ:
try:
utf16_string = self._data.decode('utf-16-le')
# TODO: evaluate the use of filter here is appropriate behavior.
return list(filter(None, utf16_string.split('\x00')))
# AttributeError is raised when self._data has no decode method.
except AttributeError as exception:
raise errors.WinRegistryValueError((
'Unsupported data type: {0!s} of value: {1!s} with error: '
'{2!s}').format(type(self._data), self._name, exception))
except UnicodeError as exception:
raise errors.WinRegistryValueError(
'Unable to read data from value: {0!s} with error: {1!s}'.format(
self._name, exception))
return self._data
|
java
|
public boolean beforeUpdateAll(Class<?> clazz, String sql,
List<String> customsSets, List<Object> customsParams,
List<Object> args) {
return true;
}
|
java
|
public static CommercePriceEntry removeByC_ERC(long companyId,
String externalReferenceCode)
throws com.liferay.commerce.price.list.exception.NoSuchPriceEntryException {
return getPersistence().removeByC_ERC(companyId, externalReferenceCode);
}
|
java
|
public void highlightElement() {
if (m_highlighting == null) {
m_highlighting = new CmsHighlightingBorder(m_position, CmsHighlightingBorder.BorderColor.red);
RootPanel.get().add(m_highlighting);
} else {
m_highlighting.setPosition(CmsPositionBean.getBoundingClientRect(getElement()));
}
}
|
java
|
private static IndexedInts getRow(ColumnValueSelector s)
{
if (s instanceof DimensionSelector) {
return ((DimensionSelector) s).getRow();
} else if (s instanceof NilColumnValueSelector) {
return ZeroIndexedInts.instance();
} else {
throw new ISE(
"ColumnValueSelector[%s], only DimensionSelector or NilColumnValueSelector is supported",
s.getClass()
);
}
}
|
python
|
def complex_median(complex_list):
""" Get the median value of a list of complex numbers.
Parameters
----------
complex_list: list
List of complex numbers to calculate the median.
Returns
-------
a + 1.j*b: complex number
The median of the real and imaginary parts.
"""
median_real = numpy.median([complex_number.real
for complex_number in complex_list])
median_imag = numpy.median([complex_number.imag
for complex_number in complex_list])
return median_real + 1.j*median_imag
|
python
|
def updateSocialTone(user, socialTone, maintainHistory):
"""
updateSocialTone updates the user with the social tones interpreted based on
the specified thresholds
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param socialTone a json object containing the social tones in the payload
returned by the Tone Analyzer
"""
currentSocial = []
currentSocialObject = []
# Process each social tone and determine if it is high or low
for tone in socialTone['tones']:
if tone['score'] >= SOCIAL_HIGH_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_high')
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= SOCIAL_LOW_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_low')
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely low'
})
else:
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user social tone
user['tone']['social']['current'] = currentSocial
if maintainHistory:
if not user['tone']['social']['current']:
user['tone']['social']['current'] = []
user['tone']['social']['current'].append(currentSocialObject)
|
java
|
@Deprecated
public List<List<String>> getParsedArgs(String[] args)
throws InvalidFormatException {
for (int i = 0; i < args.length; i++) {
if (!args[i].startsWith("-")) {
if (this.params.size() > 0) {
List<String> option = new ArrayList<String>();
option.add(this.params.get(0).longOption);
this.params.remove(0);
option.add(args[i]);
sortedArgs.add(option);
} else {
throw new InvalidFormatException(
"Expected command line option, found " + args[i]
+ " instead.");
}
} else {
for (Argument option : this.args) {
if (option.matchesFlag(args[i])) {
List<String> command = new ArrayList<String>();
command.add(noDashes(args[i]));
if (option.takesValue) {
try {
if (args[i + 1].startsWith("-")) {
if (option.valueRequired)
throw new InvalidFormatException(
"Invalid command line format: -"
+ option.option
+ " or --"
+ option.longOption
+ " requires a parameter, found "
+ args[i + 1]
+ " instead.");
} else {
command.add(args[++i]);
}
} catch (ArrayIndexOutOfBoundsException e) {
}
}
sortedArgs.add(command);
break;
}
}
}
}
return sortedArgs;
}
|
java
|
@BetaApi
public final HealthCheck getHealthCheck(ProjectGlobalHealthCheckName healthCheck) {
GetHealthCheckHttpRequest request =
GetHealthCheckHttpRequest.newBuilder()
.setHealthCheck(healthCheck == null ? null : healthCheck.toString())
.build();
return getHealthCheck(request);
}
|
python
|
def del_svc_comment(self, comment_id):
"""Delete a service comment
Format of the line that triggers function call::
DEL_SVC_COMMENT;<comment_id>
:param comment_id: comment id to delete
:type comment_id: int
:return: None
"""
for svc in self.daemon.services:
if comment_id in svc.comments:
svc.del_comment(comment_id)
self.send_an_element(svc.get_update_status_brok())
break
else:
self.send_an_element(make_monitoring_log(
'warning', 'DEL_SVC_COMMENT: comment id: %s does not exist '
'and cannot be deleted.' % comment_id))
|
java
|
private boolean skipBlankAndComma()
{
boolean commaFound = false;
while (!isEOS())
{
int c = str.charAt(idx);
if (c == ',')
{
if (commaFound)
return true;
else
commaFound = true;
}
else if (!isBlank(c))
{
return true;
}
++idx;
}
return false;
}
|
java
|
public boolean update(String tableName, Record record) {
return update(tableName, config.dialect.getDefaultPrimaryKey(), record);
}
|
java
|
private SAXParserFactory getSAXParserFactory() {
final SAXParserFactory factory = SAXParserFactory.newInstance();
factory.setNamespaceAware(this.namespaceAware);
factory.setSchema(this.schema);
return factory;
}
|
python
|
def get(self, section, key):
"""get function reads the config value for the requested section and
key and returns it
Parameters:
* **section (string):** the section to look for the config value either - oxd, client
* **key (string):** the key for the config value required
Returns:
**value (string):** the function returns the value of the key in the appropriate format if found or returns None if such a section or key couldnot be found
Example:
config = Configurer(location)
oxd_port = config.get('oxd', 'port') # returns the port of the oxd
"""
try:
return self.parser.get(section, key)
except (NoOptionError, NoSectionError) as e:
logger.warning("%s", e)
return None
|
java
|
private File subDirForId(String id) {
File subDir = new File(objs, id.substring(0, SUBDIR_POLICY));
if (!subDir.exists()) {
subDir.mkdirs();
}
return subDir;
}
|
python
|
def parse_qaml(self):
"""
Parse the GenomeQAML report, and populate metadata objects
"""
logging.info('Parsing GenomeQAML outputs')
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())
dictionary = pandas.read_csv(self.qaml_report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Get the results into the metadata object
for sample in self.metadata:
# Initialise the plasmid extractor genobject
setattr(sample, self.analysistype, GenObject())
# Initialise the list of all plasmids
sample[self.analysistype].prediction = str()
# Iterate through the dictionary of results
for line in nesteddictionary:
# Extract the sample name from the dictionary
name = nesteddictionary[line]['Sample']
# Ensure that the names match
if name == sample.name:
# Append the plasmid name extracted from the dictionary to the list of plasmids
sample[self.analysistype].prediction = nesteddictionary[line]['Predicted_Class']
|
python
|
def url_for(*args, **kw):
"""Build the URL for a target route.
The target is the first positional argument, and can be any valid
target for `Mapper.path`, which will be looked up on the current
mapper object and used to build the URL for that route.
Additionally, it can be one of:
'.'
: Builds the URL for the current route.
'/'
: Builds the URL for the root (top-most) mapper object.
'/a', '/a:b', etc.
: Builds the URL for a named route relative to the root mapper.
'.a', '..a', '..a:b', etc.
: Builds a URL for a named route relative to the current mapper.
Each additional leading '.' after the first one starts one
level higher in the hierarchy of nested mappers (i.e. '.a' is
equivalent to 'a').
Special keyword arguments:
`_query`
: Append a query string to the URL (dict or list of tuples)
`_relative`
: When True, build a relative URL (default: False)
All other keyword arguments are treated as parameters for the URL
template.
"""
# Allow passing 'self' as named parameter
self, target, args = args[0], args[1], list(args[2:])
query = kw.pop('_query', None)
relative = kw.pop('_relative', False)
url = build_url(self._context, target, args, kw)
if query:
if isinstance(query, dict):
query = sorted(query.items())
query_part = urllib.urlencode(query)
query_sep = '&' if '?' in url else '?'
url = url + query_sep + query_part
if relative:
return url
else:
return urlparse.urljoin(self.application_uri, url)
|
python
|
def _get_provisioning_state(self, response):
"""
Attempt to get provisioning state from resource.
:param requests.Response response: latest REST call response.
:returns: Status if found, else 'None'.
"""
if self._is_empty(response):
return None
body = response.json()
return body.get("properties", {}).get("provisioningState")
|
python
|
def waiver2mef(sciname, newname=None, convert_dq=True, writefits=True):
"""
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
"""
if isinstance(sciname, fits.HDUList):
filename = sciname.filename()
else:
filename = sciname
try:
clobber = True
fimg = convertwaiveredfits.convertwaiveredfits(filename)
#check for the existence of a data quality file
_dqname = fileutil.buildNewRootname(filename, extn='_c1f.fits')
dqexists = os.path.exists(_dqname)
if convert_dq and dqexists:
try:
dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
dqfitsname = fileutil.buildNewRootname(_dqname, extn='_c1h.fits')
except Exception:
print("Could not read data quality file %s" % _dqname)
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
rname = fileutil.buildNewRootname(filename)
fitsname = fileutil.buildNewRootname(rname, extn='_c0h.fits')
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out WAIVERED as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out WAIVERED as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
fimg = fits.open(fitsname, mode='update', memmap=False)
return fimg
except IOError:
print('Warning: File %s could not be found' % sciname)
return None
|
python
|
def endpoint_delete(endpoint_id):
"""
Executor for `globus endpoint delete`
"""
client = get_client()
res = client.delete_endpoint(endpoint_id)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
|
java
|
private void runTasks() {
// Execute any tasks that were queue during execution of the command.
if (!tasks.isEmpty()) {
for (Runnable task : tasks) {
log.trace("Executing task {}", task);
task.run();
}
tasks.clear();
}
}
|
python
|
def set_layer(self, layer=None, keywords=None):
"""Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
"""
if self.field_mapping_widget is not None:
self.field_mapping_widget.setParent(None)
self.field_mapping_widget.close()
self.field_mapping_widget.deleteLater()
self.main_layout.removeWidget(self.field_mapping_widget)
self.field_mapping_widget = None
if layer:
self.layer = layer
else:
self.layer = self.layer_combo_box.currentLayer()
if not self.layer:
return
if keywords is not None:
self.metadata = keywords
else:
# Always read from metadata file.
try:
self.metadata = self.keyword_io.read_keywords(self.layer)
except (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError) as e:
raise e
if 'inasafe_default_values' not in self.metadata:
self.metadata['inasafe_default_values'] = {}
if 'inasafe_fields' not in self.metadata:
self.metadata['inasafe_fields'] = {}
self.field_mapping_widget = FieldMappingWidget(
parent=self, iface=self.iface)
self.field_mapping_widget.set_layer(self.layer, self.metadata)
self.field_mapping_widget.show()
self.main_layout.addWidget(self.field_mapping_widget)
# Set header label
group_names = [
self.field_mapping_widget.tabText(i) for i in range(
self.field_mapping_widget.count())]
if len(group_names) == 0:
header_text = tr(
'There is no field group for this layer. Please select '
'another layer.')
self.header_label.setText(header_text)
return
elif len(group_names) == 1:
pretty_group_name = group_names[0]
elif len(group_names) == 2:
pretty_group_name = group_names[0] + tr(' and ') + group_names[1]
else:
pretty_group_name = ', '.join(group_names[:-1])
pretty_group_name += tr(', and {0}').format(group_names[-1])
header_text = tr(
'Please fill the information for every tab to determine the '
'attribute for {0} group.').format(pretty_group_name)
self.header_label.setText(header_text)
|
java
|
public List<String> getLockedResources(CmsDbContext dbc, CmsResource resource, CmsLockFilter filter)
throws CmsException {
List<String> lockedResources = new ArrayList<String>();
// get locked resources
Iterator<CmsLock> it = m_lockManager.getLocks(dbc, resource.getRootPath(), filter).iterator();
while (it.hasNext()) {
CmsLock lock = it.next();
lockedResources.add(dbc.removeSiteRoot(lock.getResourceName()));
}
Collections.sort(lockedResources);
return lockedResources;
}
|
java
|
public static byte[] executeJsp(
CmsObject cms,
HttpServletRequest request,
HttpServletResponse response,
CmsResource jsp,
CmsResource content) throws Exception {
CmsTemplateLoaderFacade loaderFacade = new CmsTemplateLoaderFacade(
OpenCms.getResourceManager().getLoader(jsp),
content,
jsp);
CmsResource loaderRes = loaderFacade.getLoaderStartResource();
request.setAttribute(CmsJspStandardContextBean.ATTRIBUTE_CMS_OBJECT, cms);
CmsJspStandardContextBean context = CmsJspStandardContextBean.getInstance(request);
CmsContainerElementBean element = new CmsContainerElementBean(
content.getStructureId(),
jsp.getStructureId(),
Collections.<String, String> emptyMap(),
false);
context.setElement(element);
return loaderFacade.getLoader().dump(
cms,
loaderRes,
null,
cms.getRequestContext().getLocale(),
request,
response);
}
|
python
|
def gen_schema(data, **options):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param data: Configuration data object (dict[-like] or namedtuple)
:param options: Other keyword options such as:
- ac_schema_strict: True if more strict (precise) schema is needed
- ac_schema_typemap: Type to JSON schema type mappings
:return: A dict represents JSON schema of this node
"""
if data is None:
return dict(type="null")
_type = type(data)
if _type in _SIMPLE_TYPES:
typemap = options.get("ac_schema_typemap", _SIMPLETYPE_MAP)
scm = dict(type=typemap[_type])
elif anyconfig.utils.is_dict_like(data):
scm = object_to_schema(data, **options)
elif anyconfig.utils.is_list_like(data):
scm = array_to_schema(data, **options)
return scm
|
java
|
public static Matrix random(int M, int N) {
Matrix A = new Matrix(M, N);
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
A.data[i][j] = Math.random();
}
}
return A;
}
|
python
|
def update_probes(self, progress):
"""
update the probe tree
"""
new_values = self.read_probes.probes_values
probe_count = len(self.read_probes.probes)
if probe_count > self.tree_probes.topLevelItemCount():
# when run for the first time, there are no probes in the tree, so we have to fill it first
self.fill_treewidget(self.tree_probes, new_values)
else:
for x in range(probe_count):
topLvlItem = self.tree_probes.topLevelItem(x)
for child_id in range(topLvlItem.childCount()):
child = topLvlItem.child(child_id)
child.value = new_values[topLvlItem.name][child.name]
child.setText(1, str(child.value))
if self.probe_to_plot is not None:
self.probe_to_plot.plot(self.matplotlibwidget_1.axes)
self.matplotlibwidget_1.draw()
if self.chk_probe_log.isChecked():
data = ','.join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten()))
self.probe_file.write('{:s}\n'.format(data))
|
java
|
public String getDisplayName(TextStyle style, Locale locale) {
return new DateTimeFormatterBuilder().appendChronologyText(style).toFormatter(locale).format(new DefaultInterfaceTemporalAccessor() {
@Override
public boolean isSupported(TemporalField field) {
return false;
}
@Override
public long getLong(TemporalField field) {
throw new UnsupportedTemporalTypeException("Unsupported field: " + field);
}
@SuppressWarnings("unchecked")
@Override
public <R> R query(TemporalQuery<R> query) {
if (query == TemporalQueries.chronology()) {
return (R) Chronology.this;
}
return super.query(query);
}
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.