language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def _get_tracker(self, resource):
"""
Return the resource tracker that is tracking ``resource``.
:param resource: A resource.
:return: A resource tracker.
:rtype: :class:`_ResourceTracker`
"""
with self._lock:
for rt in self._reference_queue:
if rt is not None and resource is rt.resource:
return rt
raise UnknownResourceError('Resource not created by pool')
|
python
|
def _next_trace_frames(
self,
session: Session,
trace_frame: TraceFrameQueryResult,
visited_ids: Set[int],
backwards: bool = False,
) -> List[TraceFrameQueryResult]:
"""Finds all trace frames that the given trace_frame flows to.
When backwards=True, the result will include the parameter trace_frame,
since we are filtering on the parameter's callee.
"""
query = (
session.query(
TraceFrame.id,
TraceFrame.caller_id,
CallerText.contents.label("caller"),
TraceFrame.caller_port,
TraceFrame.callee_id,
CalleeText.contents.label("callee"),
TraceFrame.callee_port,
TraceFrame.callee_location,
TraceFrame.kind,
FilenameText.contents.label("filename"),
TraceFrameLeafAssoc.trace_length,
)
.filter(TraceFrame.run_id == self.current_run_id)
.filter(TraceFrame.kind == trace_frame.kind)
.join(CallerText, CallerText.id == TraceFrame.caller_id)
.join(CalleeText, CalleeText.id == TraceFrame.callee_id)
.join(FilenameText, FilenameText.id == TraceFrame.filename_id)
.filter(
TraceFrame.caller_id != TraceFrame.callee_id
) # skip recursive calls for now
)
if backwards:
query = query.filter(TraceFrame.callee_id == trace_frame.caller_id).filter(
TraceFrame.callee_port == trace_frame.caller_port
)
else:
query = query.filter(TraceFrame.caller_id == trace_frame.callee_id).filter(
TraceFrame.caller_port == trace_frame.callee_port
)
results = (
query.join(
TraceFrameLeafAssoc, TraceFrameLeafAssoc.trace_frame_id == TraceFrame.id
)
.group_by(TraceFrame.id)
.order_by(TraceFrameLeafAssoc.trace_length, TraceFrame.callee_location)
)
filter_leaves = (
self.sources if trace_frame.kind == TraceKind.POSTCONDITION else self.sinks
)
filtered_results = []
for frame in results:
if int(frame.id) not in visited_ids and filter_leaves.intersection(
set(
self._get_leaves_trace_frame(
session,
int(frame.id),
self._trace_kind_to_shared_text_kind(frame.kind),
)
)
):
filtered_results.append(frame)
return filtered_results
|
java
|
public String updateByDiffer(MappedStatement ms) {
Class<?> entityClass = getEntityClass(ms);
StringBuilder sql = new StringBuilder();
sql.append(SqlHelper.updateTable(entityClass, tableName(entityClass)));
sql.append(updateSetColumnsByDiffer(entityClass));
sql.append(wherePKColumns(entityClass, true));
return sql.toString();
}
|
python
|
def dump_stack_trace(stack_trace, bits = None):
"""
Dump a stack trace, as returned by L{Thread.get_stack_trace} with the
C{bUseLabels} parameter set to C{False}.
@type stack_trace: list( int, int, str )
@param stack_trace: Stack trace as a list of tuples of
( return address, frame pointer, module filename )
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not stack_trace:
return ''
table = Table()
table.addRow('Frame', 'Origin', 'Module')
for (fp, ra, mod) in stack_trace:
fp_d = HexDump.address(fp, bits)
ra_d = HexDump.address(ra, bits)
table.addRow(fp_d, ra_d, mod)
return table.getOutput()
|
python
|
def consume(self, callback, queue):
"""
Register a message consumer that executes the provided callback when
messages are received.
The queue must exist prior to calling this method. If a consumer
already exists for the given queue, the callback is simply updated and
any new messages for that consumer use the new callback.
If :meth:`resumeProducing` has not been called when this method is called,
it will be called for you.
Args:
callback (callable): The callback to invoke when a message is received.
queue (str): The name of the queue to consume from.
Returns:
fedora_messaging.twisted.protocol.Consumer: A namedtuple that
identifies this consumer.
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
"""
if queue in self._consumers and self._consumers[queue].channel.is_open:
consumer = Consumer(
tag=self._consumers[queue].tag,
queue=queue,
callback=callback,
channel=self._consumers[queue].channel,
)
self._consumers[queue] = consumer
defer.returnValue(consumer)
channel = yield self._allocate_channel()
consumer = Consumer(
tag=str(uuid.uuid4()), queue=queue, callback=callback, channel=channel
)
self._consumers[queue] = consumer
if not self._running:
yield self.resumeProducing()
defer.returnValue(consumer)
queue_object, _ = yield consumer.channel.basic_consume(
queue=consumer.queue, consumer_tag=consumer.tag
)
deferred = self._read(queue_object, consumer)
deferred.addErrback(
lambda f: _legacy_twisted_log.msg,
"_read failed on consumer {c}",
c=consumer,
logLevel=logging.ERROR,
)
_legacy_twisted_log.msg("Successfully registered AMQP consumer {c}", c=consumer)
defer.returnValue(consumer)
|
java
|
void addGuardParameters( Map<String, Expression> parameters, boolean isDefault ) {
isGuard = true;
wasDefaultFunction = false;
guardDefault = isDefault;
if( parameters != null ) {
addMixin( null, parameters, null );
}
}
|
python
|
def _print_results_to_stdout(
self,
classifications,
crossmatches):
"""*print the classification and crossmatch results for a single transient object to stdout*
**Key Arguments:**
- ``crossmatches`` -- the unranked crossmatch classifications
- ``classifications`` -- the classifications assigned to the transients post-crossmatches (dictionary of rank ordered list of classifications)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_print_results_to_stdout`` method')
if self.verbose == 0:
return
if self.name in classifications:
headline = self.name + "'s Predicted Classification: " + \
classifications[self.name][0]
else:
headline = self.name + "'s Predicted Classification: ORPHAN"
print headline
print
print "Suggested Associations:"
# REPORT ONLY THE MOST PREFERED MAGNITUDE VALUE
basic = ["association_type", "rank", "rankScore", "catalogue_table_name", "catalogue_object_id", "catalogue_object_type", "catalogue_object_subtype",
"raDeg", "decDeg", "separationArcsec", "physical_separation_kpc", "direct_distance", "distance", "z", "photoZ", "photoZErr", "Mag", "MagFilter", "MagErr", "classificationReliability", "merged_rank"]
verbose = ["search_name", "catalogue_view_name", "original_search_radius_arcsec", "direct_distance_modulus", "distance_modulus", "direct_distance_scale", "major_axis_arcsec", "scale", "U", "UErr",
"B", "BErr", "V", "VErr", "R", "RErr", "I", "IErr", "J", "JErr", "H", "HErr", "K", "KErr", "_u", "_uErr", "_g", "_gErr", "_r", "_rErr", "_i", "_iErr", "_z", "_zErr", "_y", "G", "GErr", "_yErr", "unkMag"]
dontFormat = ["decDeg", "raDeg", "rank",
"catalogue_object_id", "catalogue_object_subtype", "merged_rank"]
if self.verbose == 2:
basic = basic + verbose
for c in crossmatches:
for f in self.filterPreference:
if f in c and c[f]:
c["Mag"] = c[f]
c["MagFilter"] = f.replace("_", "").replace("Mag", "")
if f + "Err" in c:
c["MagErr"] = c[f + "Err"]
else:
c["MagErr"] = None
break
allKeys = []
for c in crossmatches:
for k, v in c.iteritems():
if k not in allKeys:
allKeys.append(k)
for c in crossmatches:
for k in allKeys:
if k not in c:
c[k] = None
printCrossmatches = []
for c in crossmatches:
ordDict = collections.OrderedDict(sorted({}.items()))
for k in basic:
if k in c:
if k == "catalogue_table_name":
c[k] = c[k].replace("tcs_cat_", "").replace("_", " ")
if k == "classificationReliability":
if c[k] == 1:
c["classification reliability"] = "synonym"
elif c[k] == 2:
c["classification reliability"] = "association"
elif c[k] == 3:
c["classification reliability"] = "annotation"
k = "classification reliability"
if k == "catalogue_object_subtype" and "sdss" in c["catalogue_table_name"]:
if c[k] == 6:
c[k] = "galaxy"
elif c[k] == 3:
c[k] = "star"
columnName = k.replace("tcs_cat_", "").replace("_", " ")
value = c[k]
if k not in dontFormat:
try:
ordDict[columnName] = "%(value)0.2f" % locals()
except:
ordDict[columnName] = value
else:
ordDict[columnName] = value
printCrossmatches.append(ordDict)
from fundamentals.renderer import list_of_dictionaries
dataSet = list_of_dictionaries(
log=self.log,
listOfDictionaries=printCrossmatches
)
tableData = dataSet.table(filepath=None)
print tableData
self.log.debug('completed the ``_print_results_to_stdout`` method')
return None
|
java
|
public final ScanRun getScanRun(ScanRunName name) {
GetScanRunRequest request =
GetScanRunRequest.newBuilder().setName(name == null ? null : name.toString()).build();
return getScanRun(request);
}
|
java
|
public JSONObject getGroupUsers(String groupId, HashMap<String, String> options) {
AipRequest request = new AipRequest();
preOperation(request);
request.addBody("group_id", groupId);
if (options != null) {
request.addBody(options);
}
request.setUri(FaceConsts.GROUP_GETUSERS);
request.setBodyFormat(EBodyFormat.RAW_JSON);
postOperation(request);
return requestServer(request);
}
|
python
|
def is_parent_of_objective_bank(self, id_, objective_bank_id):
"""Tests if an ``Id`` is a direct parent of an objective bank.
arg: id (osid.id.Id): an ``Id``
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
return: (boolean) - ``true`` if this ``id`` is a parent of
``objective_bank_id,`` ``false`` otherwise
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``id`` or ``objective_bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_parent_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=objective_bank_id)
return self._hierarchy_session.is_parent(id_=objective_bank_id, parent_id=id_)
|
java
|
private void compactGroups() {
for (int i = 0; i < groups.size(); i++) {
List<Segment> group = groups.get(i);
List<OffsetPredicate> groupPredicates = predicates.get(i);
Segment segment = compactGroup(group, groupPredicates);
mergeReleased(group, groupPredicates, segment);
deleteGroup(group);
}
}
|
java
|
public static String ENDPOINT_UNKNOWN_PARAMS(Object arg0, Object arg1, Object arg2) {
return localizer.localize(localizableENDPOINT_UNKNOWN_PARAMS(arg0, arg1, arg2));
}
|
python
|
def supervised_to_dict(dataset, text2self):
"""Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset
"""
def my_fn(inputs, targets):
if text2self:
return {"targets": targets}
else:
return {"inputs": inputs, "targets": targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
python
|
def find_by_opcode( checked_ops, opcode ):
"""
Given all previously-accepted operations in this block,
find the ones that are of a particular opcode.
@opcode can be one opcode, or a list of opcodes
>>> find_by_opcode([{'op': '+'}, {'op': '>'}], 'NAME_UPDATE')
[{'op': '+'}]
>>> find_by_opcode([{'op': '+'}, {'op': '>'}], ['NAME_UPDATE', 'NAME_TRANSFER'])
[{'op': '+'}, {'op': '>'}]
>>> find_by_opcode([{'op': '+'}, {'op': '>'}], ':')
[]
>>> find_by_opcode([], ':')
[]
"""
if type(opcode) != list:
opcode = [opcode]
ret = []
for opdata in checked_ops:
if op_get_opcode_name(opdata['op']) in opcode:
ret.append(opdata)
return ret
|
java
|
static MultiMap<String> resolveParameters(Method method, Object[] args) {
MultiMap<String> parameterMap = new MultiValueMap<>(Optional.ofNullable(args).map(ary -> ary.length).orElse(0));
if (args == null) {
return parameterMap;
}
Parameter[] parameters = method.getParameters();
for (int i = 0; i < parameters.length; i++) {
Parameter parameter = parameters[i];
// 移除header
if ((parameter.isAnnotationPresent(Header.class) || parameter.isAnnotationPresent(ParamHeader.class))
&& !parameter.isAnnotationPresent(Param.class)) {
continue;
}
if (parameter.isAnnotationPresent(Expensive.class)) {
Object obj = args[i];
JSONObject jsonObject = (JSONObject) JSON.toJSON(obj);
jsonObject.forEach((key, value) -> parameterMap
.put(key, Optional.ofNullable(value).orElse(null)));
} else {
Param param = parameter.getAnnotation(Param.class);
String parameterName = Optional.ofNullable(param).map(Param::value)
.orElse(parameter.getName());
parameterMap.put(parameterName, args[i]);
}
}
return parameterMap;
}
|
java
|
public static String extractName(String className) {
if (className == null) return null;
int index = className.lastIndexOf('.');
if (index != -1) return className.substring(index + 1);
return className;
}
|
python
|
def finish_data(self, layers):
"""
Modify data before it is drawn out by the geom
Parameters
----------
layers : list
List of layers
"""
for layer in layers:
layer.data = self.facet.finish_data(layer.data, self)
|
python
|
def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
if not data:
return False
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
|
python
|
def simplify_for_Union(type_list):
"""Removes types that are subtypes of other elements in the list.
Does not return a copy, but instead modifies the given list.
Intended for preprocessing of types to be combined into a typing.Union.
Subtypecheck is backed by pytypes.is_subtype, so this differs from
typing.Union's own simplification efforts.
E.g. this also considers numeric tower like described in
https://www.python.org/dev/peps/pep-0484/#the-numeric-tower
(treats int as subtype of float as subtype of complex)
Use pytypes.apply_numeric_tower flag to switch off numeric tower support.
"""
i = 0
while i < len(type_list):
j = 0
while j < i:
if _issubclass(type_list[j], type_list[i]):
del type_list[j]
i -= 1
else:
j += 1
j = i+1
while j < len(type_list):
if _issubclass(type_list[j], type_list[i]):
del type_list[j]
else:
j += 1
i += 1
|
python
|
def ack_message(self, message):
"""Acknowledge the message on the broker and log the ack
:param message: The message to acknowledge
:type message: rejected.data.Message
"""
if message.channel.is_closed:
LOGGER.warning('Can not ack message, channel is closed')
self.counters[self.CLOSED_ON_COMPLETE] += 1
return
message.channel.basic_ack(delivery_tag=message.delivery_tag)
self.counters[self.ACKED] += 1
self.measurement.set_tag(self.ACKED, True)
|
java
|
protected static void loadProvider(final URL url, final ClassLoader cl) {
try {
final Properties props = PropertiesUtil.loadClose(url.openStream(), url);
if (validVersion(props.getProperty(API_VERSION))) {
final Provider provider = new Provider(props, url, cl);
PROVIDERS.add(provider);
LOGGER.debug("Loaded Provider {}", provider);
}
} catch (final IOException e) {
LOGGER.error("Unable to open {}", url, e);
}
}
|
java
|
Set<Integer> findLoadStoreBacktrackPositions(final Set<Integer> loadStorePositions) {
// go to this or next zero-position
return loadStorePositions.stream().map(this::findBacktrackPosition).collect(Collectors.toSet());
}
|
python
|
def add_commands(cmds):
"""
Adds one or more commands to the module namespace.
Commands must end in "Command" to be added.
Example (see tests/parser.py):
sievelib.commands.add_commands(MytestCommand)
:param cmds: a single Command Object or list of Command Objects
"""
if not isinstance(cmds, Iterable):
cmds = [cmds]
for command in cmds:
if command.__name__.endswith("Command"):
globals()[command.__name__] = command
|
java
|
private Thread createServerThread(ServerTask r) {
Thread t = new Thread(r);
t.setName("EmbeddedLibertyServer-" + t.getName());
return t;
}
|
java
|
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
transformation.getOutputType();
// configure the type if needed
if (sinkFunction instanceof InputTypeConfigurable) {
((InputTypeConfigurable) sinkFunction).setInputType(getType(), getExecutionConfig());
}
StreamSink<T> sinkOperator = new StreamSink<>(clean(sinkFunction));
DataStreamSink<T> sink = new DataStreamSink<>(this, sinkOperator);
getExecutionEnvironment().addOperator(sink.getTransformation());
return sink;
}
|
java
|
private void initFilter() {
EntityMetadata metadata = KunderaMetadataManager.getEntityMetadata(kunderaMetadata, entityClass);
Metamodel metaModel = kunderaMetadata.getApplicationMetadata().getMetamodel(getPersistenceUnit());
EntityType entityType = metaModel.entity(entityClass);
if (null == filter) {
List<String> clauses = new ArrayList<String>();
addDiscriminatorClause(clauses, entityType);
return;
}
WhereClause whereClause = KunderaQueryUtils.getWhereClause(getJpqlExpression());
KunderaQueryUtils.traverse(whereClause.getConditionalExpression(), metadata, kunderaMetadata, this, false);
for (Object filterClause : filtersQueue) {
if (!(filterClause instanceof String)) {
onTypedParameter(((FilterClause) filterClause));
}
}
addDiscriminatorClause(null, entityType);
}
|
java
|
static ZonedDateTime getDateFromStringToZoneId(String date, ZoneId zoneId, DateTimeFormatter formatter) throws DateTimeParseException {
try {
//noticed that date not parsed with non-US locale. For me this fix is helpful
LocalDate localDate = LocalDate.parse(date, formatter);
ZonedDateTime usDate = localDate.atStartOfDay(zoneId);
return usDate.withZoneSameInstant(zoneId);
} catch (Exception e) {
//In case the parsing fail, we try without time
try {
ZonedDateTime usDate = LocalDate.parse(date, formatter).atStartOfDay(ZoneId.of(Constants.SHERDOG_TIME_ZONE));
return usDate.withZoneSameInstant(zoneId);
} catch (DateTimeParseException e2) {
return null;
}
}
}
|
java
|
public static Map<String, String> AppConsumeByMap(Map<String, String> reqData) {
return SDKUtil.convertResultStringToMap(AppConsume(reqData));
}
|
java
|
public String getScript(ClientBehaviorContext behaviorContext)
{
if (behaviorContext == null)
{
throw new NullPointerException("behaviorContext");
}
ClientBehaviorRenderer renderer = getRenderer(behaviorContext.getFacesContext());
if (renderer != null)
{
// If a BehaviorRenderer is available for the specified behavior renderer type, this method delegates
// to the BehaviorRenderer.getScript method.
try
{
setCachedFacesContext(behaviorContext.getFacesContext());
return renderer.getScript(behaviorContext, this);
}
finally
{
setCachedFacesContext(null);
}
}
// Otherwise, this method returns null.
return null;
}
|
java
|
public ServiceRegistration<FileMonitor> monitorFiles(Collection<String> paths, long monitorInterval) {
BundleContext bundleContext = actionable.getBundleContext();
final Hashtable<String, Object> fileMonitorProps = new Hashtable<String, Object>();
fileMonitorProps.put(FileMonitor.MONITOR_FILES, paths);
fileMonitorProps.put(FileMonitor.MONITOR_INTERVAL, monitorInterval);
return bundleContext.registerService(FileMonitor.class, this, fileMonitorProps);
}
|
python
|
def children(self, val: list):
""" Sets children
:param val: List of citation children
"""
final_value = []
if val is not None:
for citation in val:
if citation is None:
continue
elif not isinstance(citation, (BaseCitation, type(self))):
raise TypeError("Citation children should be Citation")
else:
if isinstance(self, BaseCitation):
citation.root = self.root
else:
citation.root = self
final_value.append(citation)
self._children = final_value
|
python
|
def get(self, session):
'''taobao.aftersale.get 查询用户售后服务模板
查询用户设置的售后服务模板,仅返回标题和id'''
request = TOPRequest('taobao.aftersale.get')
self.create(self.execute(request, session))
return self.after_sales
|
python
|
def get_psf_sky(self, ra, dec):
"""
Determine the local psf at a given sky location.
The psf is returned in degrees.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
a, b, pa : float
The psf semi-major axis, semi-minor axis, and position angle in (degrees).
If a psf is defined then it is the psf that is returned, otherwise the image
restoring beam is returned.
"""
# If we don't have a psf map then we just fall back to using the beam
# from the fits header (including ZA scaling)
if self.data is None:
beam = self.wcshelper.get_beam(ra, dec)
return beam.a, beam.b, beam.pa
x, y = self.sky2pix([ra, dec])
# We leave the interpolation in the hands of whoever is making these images
# clamping the x,y coords at the image boundaries just makes sense
x = int(np.clip(x, 0, self.data.shape[1] - 1))
y = int(np.clip(y, 0, self.data.shape[2] - 1))
psf_sky = self.data[:, x, y]
return psf_sky
|
python
|
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords
|
java
|
public void removeMailbox(long hsId) {
synchronized (m_mapLock) {
ImmutableMap.Builder<Long, Mailbox> b = ImmutableMap.builder();
for (Map.Entry<Long, Mailbox> e : m_siteMailboxes.entrySet()) {
if (e.getKey().equals(hsId)) {
continue;
}
b.put(e.getKey(), e.getValue());
}
m_siteMailboxes = b.build();
}
}
|
java
|
private JarFile getJarFile()
throws IOException
{
JarFile jarFile = null;
isCacheValid();
if (! _backingIsFile) {
throw new FileNotFoundException(getBacking().getNativePath());
}
try {
jarFile = new JarFile(getBacking().getNativePath());
/*
if (_backing.getNativePath().indexOf("cssparser.jar") > 0)
System.out.println("JAR: " + _backing + " " + jarFile);
*/
}
catch (IOException ex) {
if (log.isLoggable(Level.FINE))
log.log(Level.FINE, L.l("Error opening jar file '{0}'", getBacking().getNativePath()));
throw ex;
}
return jarFile;
}
|
java
|
private Expr parseLambdaInitialiser(EnclosingScope scope, boolean terminated) {
int start = index;
match(Ampersand);
// First parse the captured lifetimes with the original scope
Tuple<Identifier> captures = parseOptionalCapturedLifetimes(scope);
// Now we create a new scope for this lambda expression.
// It keeps all variables but only the given captured lifetimes.
// But it keeps all unavailable names, i.e. unaccessible lifetimes
// from the outer scope cannot be redeclared.
scope = scope.newEnclosingScope(captures);
// Parse the optional lifetime parameters
Tuple<Identifier> lifetimeParameters = parseOptionalLifetimeParameters(scope);
Tuple<Decl.Variable> parameters = parseParameters(scope,MinusGreater);
// NOTE: expression guanrateed to be terminated by ')'
Expr body = parseExpression(scope, true);
match(RightBrace);
return annotateSourceLocation(new Decl.Lambda(new Tuple<>(), new Identifier(""), parameters, captures,
lifetimeParameters, body, new Type.Unknown()), start);
}
|
java
|
public IfcLampTypeEnum createIfcLampTypeEnumFromString(EDataType eDataType, String initialValue) {
IfcLampTypeEnum result = IfcLampTypeEnum.get(initialValue);
if (result == null)
throw new IllegalArgumentException(
"The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'");
return result;
}
|
python
|
def indent_lines(lines, output, branch_method, leaf_method, pass_syntax, flush_left_syntax, flush_left_empty_line,
indentation_method, get_block):
"""Returns None.
The way this function produces output is by adding strings to the
list that's passed in as the second parameter.
Parameters
----------
lines : list of basestring's
Each string is a line of a SHPAML source code
(trailing newlines not included).
output : empty list
Explained earlier...
The remaining parameters are exactly the same as in the indent
function:
* branch_method
* leaf_method
* pass_syntax
* flush_left_syntax
* flush_left_empty_line
* indentation_method
* get_block
"""
append = output.append
def recurse(prefix_lines):
while prefix_lines:
prefix, line = prefix_lines[0]
if line == '':
prefix_lines.pop(0)
append('')
continue
block_size = get_block(prefix_lines)
if block_size == 1:
prefix_lines.pop(0)
if line == pass_syntax:
pass
elif line.startswith(flush_left_syntax):
append(line[len(flush_left_syntax):])
elif line.startswith(flush_left_empty_line):
append('')
else:
append(prefix + leaf_method(line))
else:
block = prefix_lines[:block_size]
prefix_lines = prefix_lines[block_size:]
branch_method(output, block, recurse)
return
prefix_lines = list(map(indentation_method, lines))
recurse(prefix_lines)
|
python
|
def GetCountStopTimes(self):
"""Return the number of stops made by this trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,))
return cursor.fetchone()[0]
|
java
|
public Comparator<Integer> getCompByName() {
return new Comparator<Integer>() {
@Override
public int compare(Integer t1, Integer t2) {
Integer p1 = tokenCollection.get(t1).getPositionStart();
Integer p2 = tokenCollection.get(t2).getPositionStart();
assert p1 != null : "no position for " + tokenCollection.get(t1);
assert p2 != null : "no position for " + tokenCollection.get(t2);
if (p1.equals(p2)) {
Integer o1 = tokenCollection.get(t1).getOffsetStart();
Integer o2 = tokenCollection.get(t2).getOffsetStart();
if (o1 != null && o2 != null) {
if (o1.equals(o2)) {
return tokenCollection.get(t1).getValue()
.compareTo(tokenCollection.get(t2).getValue());
} else {
return o1.compareTo(o2);
}
} else {
return tokenCollection.get(t1).getValue()
.compareTo(tokenCollection.get(t2).getValue());
}
}
return p1.compareTo(p2);
}
};
}
|
python
|
def htmlSetMetaEncoding(self, encoding):
"""Sets the current encoding in the Meta tags NOTE: this will
not change the document content encoding, just the META
flag associated. """
ret = libxml2mod.htmlSetMetaEncoding(self._o, encoding)
return ret
|
java
|
@SuppressWarnings("unchecked")
@Override
public EList<PluginConfiguration> getConfigurations() {
return (EList<PluginConfiguration>) eGet(StorePackage.Literals.PLUGIN_DESCRIPTOR__CONFIGURATIONS, true);
}
|
java
|
public static Duration convertUnits(double duration, TimeUnit fromUnits, TimeUnit toUnits, double minutesPerDay, double minutesPerWeek, double daysPerMonth)
{
switch (fromUnits)
{
case YEARS:
{
duration *= (minutesPerWeek * 52);
break;
}
case ELAPSED_YEARS:
{
duration *= (60 * 24 * 7 * 52);
break;
}
case MONTHS:
{
duration *= (minutesPerDay * daysPerMonth);
break;
}
case ELAPSED_MONTHS:
{
duration *= (60 * 24 * 30);
break;
}
case WEEKS:
{
duration *= minutesPerWeek;
break;
}
case ELAPSED_WEEKS:
{
duration *= (60 * 24 * 7);
break;
}
case DAYS:
{
duration *= minutesPerDay;
break;
}
case ELAPSED_DAYS:
{
duration *= (60 * 24);
break;
}
case HOURS:
case ELAPSED_HOURS:
{
duration *= 60;
break;
}
default:
{
break;
}
}
if (toUnits != TimeUnit.MINUTES && toUnits != TimeUnit.ELAPSED_MINUTES)
{
switch (toUnits)
{
case HOURS:
case ELAPSED_HOURS:
{
duration /= 60;
break;
}
case DAYS:
{
if (minutesPerDay != 0)
{
duration /= minutesPerDay;
}
else
{
duration = 0;
}
break;
}
case ELAPSED_DAYS:
{
duration /= (60 * 24);
break;
}
case WEEKS:
{
if (minutesPerWeek != 0)
{
duration /= minutesPerWeek;
}
else
{
duration = 0;
}
break;
}
case ELAPSED_WEEKS:
{
duration /= (60 * 24 * 7);
break;
}
case MONTHS:
{
if (minutesPerDay != 0 && daysPerMonth != 0)
{
duration /= (minutesPerDay * daysPerMonth);
}
else
{
duration = 0;
}
break;
}
case ELAPSED_MONTHS:
{
duration /= (60 * 24 * 30);
break;
}
case YEARS:
{
if (minutesPerWeek != 0)
{
duration /= (minutesPerWeek * 52);
}
else
{
duration = 0;
}
break;
}
case ELAPSED_YEARS:
{
duration /= (60 * 24 * 7 * 52);
break;
}
default:
{
break;
}
}
}
return (Duration.getInstance(duration, toUnits));
}
|
java
|
public CreateMLModelRequest withParameters(java.util.Map<String, String> parameters) {
setParameters(parameters);
return this;
}
|
python
|
def load_fasttext_format(cls, path, ctx=cpu(), **kwargs):
"""Create an instance of the class and load weights.
Load the weights from the fastText binary format created by
https://github.com/facebookresearch/fastText
Parameters
----------
path : str
Path to the .bin model file.
ctx : mx.Context, default mx.cpu()
Context to initialize the weights on.
kwargs : dict
Keyword arguments are passed to the class initializer.
"""
with open(path, 'rb') as f:
new_format, dim, bucket, minn, maxn, = cls._read_model_params(f)
idx_to_token = cls._read_vocab(f, new_format)
dim, matrix = cls._read_vectors(f, new_format, bucket,
len(idx_to_token))
token_to_idx = {token: idx for idx, token in enumerate(idx_to_token)}
if len(token_to_idx) != len(idx_to_token):
# If multiple tokens with invalid encoding were collapsed in a
# single token due to replacement of invalid bytes with Unicode
# replacement character
warnings.warn(
'There are duplicate tokens in the embedding file. '
'This is likely due to decoding errors for some tokens, '
'where invalid bytes were replaced by '
'the Unicode replacement character. '
'This affects {} tokens.'.format(
len(idx_to_token) - len(token_to_idx)))
for _ in range(len(token_to_idx), len(idx_to_token)):
# Add pseudo tokens to make sure length is the same
token_to_idx[object()] = -1
assert len(token_to_idx) == len(idx_to_token)
subword_function = create_subword_function(
'NGramHashes', num_subwords=matrix.shape[0] - len(idx_to_token),
ngrams=list(range(minn, maxn + 1)), special_tokens={'</s>'})
self = cls(token_to_idx, subword_function, output_dim=dim, **kwargs)
self.initialize(ctx=ctx)
self.weight.set_data(nd.array(matrix))
return self
|
java
|
public PdfPatternPainter createPattern(float width, float height, float xstep, float ystep, Color color) {
checkWriter();
if ( xstep == 0.0f || ystep == 0.0f )
throw new RuntimeException("XStep or YStep can not be ZERO.");
PdfPatternPainter painter = new PdfPatternPainter(writer, color);
painter.setWidth(width);
painter.setHeight(height);
painter.setXStep(xstep);
painter.setYStep(ystep);
writer.addSimplePattern(painter);
return painter;
}
|
python
|
def _get_namespace_filter(
taxon_filter: Optional[int]=None,
category_filter: Optional[str]=None) -> Union[None, str]:
"""
Given either a taxon and/or category, return the correct namespace
:raises ValueError: If category is provided without a taxon
"""
namespace_filter = None
taxon_category_default = {
10090: 'gene',
9606: 'disease',
7227: 'gene',
6239: 'gene',
7955: 'gene'
}
if category_filter is not None and taxon_filter is None:
raise ValueError("Must provide taxon filter along with category")
elif category_filter is None and taxon_filter is not None:
category_filter = taxon_category_default[taxon_filter]
else:
return namespace_filter
return OwlSim2Api.TAX_TO_NS[taxon_filter][category_filter.lower()]
|
python
|
def cds_length_of_associated_transcript(effect):
"""
Length of coding sequence of transcript associated with effect,
if there is one (otherwise return 0).
"""
return apply_to_transcript_if_exists(
effect=effect,
fn=lambda t: len(t.coding_sequence) if (t.complete and t.coding_sequence) else 0,
default=0)
|
java
|
public Graph<T> reduce(Graph<T> g) {
boolean subgraph = nodes.containsAll(g.nodes) &&
g.edges.keySet().stream()
.allMatch(u -> adjacentNodes(u).containsAll(g.adjacentNodes(u)));
if (!subgraph) {
throw new IllegalArgumentException(g + " is not a subgraph of " + this);
}
Builder<T> builder = new Builder<>();
nodes.stream()
.forEach(u -> {
builder.addNode(u);
// filter the edge if there exists a path from u to v in the given g
// or there exists another path from u to v in this graph
edges.get(u).stream()
.filter(v -> !g.pathExists(u, v) && !pathExists(u, v, false))
.forEach(v -> builder.addEdge(u, v));
});
// add the overlapped edges from this graph and the given g
g.edges().keySet().stream()
.forEach(u -> g.adjacentNodes(u).stream()
.filter(v -> isAdjacent(u, v))
.forEach(v -> builder.addEdge(u, v)));
return builder.build().reduce();
}
|
java
|
public static Map.Entry<String, Map<String, ?>> networkSpeedCommand(
NetworkSpeed networkSpeed) {
return new AbstractMap.SimpleEntry<>(NETWORK_SPEED,
prepareArguments("netspeed", networkSpeed.name().toLowerCase()));
}
|
java
|
private Diagram loadDiagram(InputStream in, boolean offset)
throws SlickException {
try {
DocumentBuilderFactory factory = DocumentBuilderFactory
.newInstance();
factory.setValidating(false);
factory.setNamespaceAware(true);
DocumentBuilder builder = factory.newDocumentBuilder();
builder.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId,
String systemId) throws SAXException, IOException {
return new InputSource(
new ByteArrayInputStream(new byte[0]));
}
});
Document doc = builder.parse(in);
Element root = doc.getDocumentElement();
String widthString = root.getAttribute("width");
while (Character.isLetter(widthString
.charAt(widthString.length() - 1))) {
widthString = widthString.substring(0, widthString.length() - 1);
}
String heightString = root.getAttribute("height");
while (Character.isLetter(heightString
.charAt(heightString.length() - 1))) {
heightString = heightString.substring(0,heightString.length() - 1);
}
float docWidth = Float.parseFloat(widthString);
float docHeight = Float.parseFloat(heightString);
diagram = new Diagram(docWidth, docHeight);
if (!offset) {
docHeight = 0;
}
loadChildren(root, Transform
.createTranslateTransform(0, -docHeight));
return diagram;
} catch (Exception e) {
throw new SlickException("Failed to load inkscape document", e);
}
}
|
python
|
def _read_data(fname):
"""Reads data from file.
Reads the data in 'fname' into a list where each list entry contains
[energy predicted, energy calculated, list of concentrations].
Parameters
----------
fname : str
The name and path to the data file.
Returns
-------
energy : list of lists of floats
A list of the energies and the concentrations.
"""
energy = []
with open(fname,'r') as f:
for line in f:
CE = abs(float(line.strip().split()[0]))
VASP = abs(float(line.strip().split()[1]))
conc = [i for i in line.strip().split()[2:]]
conc_f = []
for c in conc:
if '[' in c and ']' in c:
conc_f.append(int(c[1:-1]))
elif '[' in c:
conc_f.append(int(c[1:-1]))
elif ']' in c or ',' in c:
conc_f.append(int(c[:-1]))
else:
conc_f.append(int(c))
energy.append([CE,VASP,conc_f])
return energy
|
python
|
def stream_user(self, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Streams events that are relevant to the authorized user, i.e. home
timeline and notifications.
"""
return self.__stream('/api/v1/streaming/user', listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec)
|
python
|
def redact_image(
self,
parent,
inspect_config=None,
image_redaction_configs=None,
include_findings=None,
byte_item=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Redacts potentially sensitive info from an image.
This method has limits on input size, processing time, and output size.
See https://cloud.google.com/dlp/docs/redacting-sensitive-data-images to
learn more.
When no InfoTypes or CustomInfoTypes are specified in this request, the
system will automatically choose what detectors to run. By default this may
be all types, but may change over time as detectors are updated.
Example:
>>> from google.cloud import dlp_v2
>>>
>>> client = dlp_v2.DlpServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.redact_image(parent)
Args:
parent (str): The parent resource name, for example projects/my-project-id.
inspect_config (Union[dict, ~google.cloud.dlp_v2.types.InspectConfig]): Configuration for the inspector.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.InspectConfig`
image_redaction_configs (list[Union[dict, ~google.cloud.dlp_v2.types.ImageRedactionConfig]]): The configuration for specifying what content to redact from images.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ImageRedactionConfig`
include_findings (bool): Whether the response should include findings along with the redacted
image.
byte_item (Union[dict, ~google.cloud.dlp_v2.types.ByteContentItem]): The content must be PNG, JPEG, SVG or BMP.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ByteContentItem`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dlp_v2.types.RedactImageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "redact_image" not in self._inner_api_calls:
self._inner_api_calls[
"redact_image"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.redact_image,
default_retry=self._method_configs["RedactImage"].retry,
default_timeout=self._method_configs["RedactImage"].timeout,
client_info=self._client_info,
)
request = dlp_pb2.RedactImageRequest(
parent=parent,
inspect_config=inspect_config,
image_redaction_configs=image_redaction_configs,
include_findings=include_findings,
byte_item=byte_item,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["redact_image"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
python
|
def p_assignment_delay(self, p):
'assignment : ASSIGN delays lvalue EQUALS delays rvalue SEMICOLON'
p[0] = Assign(p[3], p[6], p[2], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
python
|
def get_created_date_metadata(self):
"""Gets the metadata for the asset creation date.
return: (osid.Metadata) - metadata for the created date
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['created_date'])
metadata.update({'existing_date_time_values': self._my_map['createdDate']})
return Metadata(**metadata)
|
python
|
def _add_or_remove_flag(self, flag, add):
"""
Add the given `flag` if `add` is True, remove it otherwise.
"""
meth = self.add_flag if add else self.remove_flag
meth(flag)
|
java
|
private MutationStatus incrCoords(KeySpec ks) {
final StorageVBucketCoordinates curCoord;
synchronized (vbCoords) {
curCoord = vbCoords[ks.vbId];
}
long seq = curCoord.incrSeqno();
long uuid = curCoord.getUuid();
VBucketCoordinates coord = new BasicVBucketCoordinates(uuid, seq);
return new MutationStatus(coord);
}
|
java
|
protected PathResource getFileResource(final Path file, final String path, final Path symlinkBase, String normalizedFile) throws IOException {
if (this.caseSensitive) {
if (symlinkBase != null) {
String relative = symlinkBase.relativize(file.normalize()).toString();
String fileResolved = file.toRealPath().toString();
String symlinkBaseResolved = symlinkBase.toRealPath().toString();
if (!fileResolved.startsWith(symlinkBaseResolved)) {
log.tracef("Rejected path resource %s from path resource manager with base %s, as the case did not match actual case of %s", path, base, normalizedFile);
return null;
}
String compare = fileResolved.substring(symlinkBaseResolved.length());
if(compare.startsWith(fileSystem.getSeparator())) {
compare = compare.substring(fileSystem.getSeparator().length());
}
if(relative.startsWith(fileSystem.getSeparator())) {
relative = relative.substring(fileSystem.getSeparator().length());
}
if (relative.equals(compare)) {
log.tracef("Found path resource %s from path resource manager with base %s", path, base);
return new PathResource(file, this, path, eTagFunction.generate(file));
}
log.tracef("Rejected path resource %s from path resource manager with base %s, as the case did not match actual case of %s", path, base, normalizedFile);
return null;
} else if (isFileSameCase(file, normalizedFile)) {
log.tracef("Found path resource %s from path resource manager with base %s", path, base);
return new PathResource(file, this, path, eTagFunction.generate(file));
} else {
log.tracef("Rejected path resource %s from path resource manager with base %s, as the case did not match actual case of %s", path, base, normalizedFile);
return null;
}
} else {
log.tracef("Found path resource %s from path resource manager with base %s", path, base);
return new PathResource(file, this, path, eTagFunction.generate(file));
}
}
|
java
|
public static Set<String> listDeployments(Resource deploymentRootResource, Set<String> runtimeNames) {
Set<Pattern> set = new HashSet<>();
for (String wildcardExpr : runtimeNames) {
Pattern pattern = DeploymentOverlayIndex.getPattern(wildcardExpr);
set.add(pattern);
}
return listDeploymentNames(deploymentRootResource, set);
}
|
python
|
def clientUpdated(self, *args, **kwargs):
"""
Client Updated Messages
Message that a new client has been updated.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-updated',
'name': 'clientUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
java
|
public static String getModifierSuffix(String fullName, String baseName) {
if (fullName.equals(baseName)) {
return null;
}
int indexOfOpeningBracket = fullName.indexOf(MODIFIER_OPENING_TOKEN);
return fullName.substring(indexOfOpeningBracket, fullName.length());
}
|
java
|
public static String generateCssStyle(CmsObject cms) {
StringBuffer result = new StringBuffer(128);
result.append("<style type='text/css'>\n");
String contents = "";
try {
contents = new String(
cms.readFile(CmsWorkplace.VFS_PATH_COMMONS + "style/report.css").getContents(),
OpenCms.getSystemInfo().getDefaultEncoding());
} catch (Exception e) {
// ignore
}
if (CmsStringUtil.isEmpty(contents)) {
// css file not found, create default styles
result.append(
"body { box-sizing: border-box; -moz-box-sizing: border-box; padding: 2px; margin: 0; color: /*begin-color WindowText*/#000000/*end-color*/; background-color: /*begin-color Window*/#ffffff/*end-color*/; font-family: Verdana, Arial, Helvetica, sans-serif; font-size: 11px; }\n");
result.append(
"div.main { box-sizing: border-box; -moz-box-sizing: border-box; color: /*begin-color WindowText*/#000000/*end-color*/; white-space: nowrap; }\n");
result.append("span.head { color: #000099; font-weight: bold; }\n");
result.append("span.note { color: #666666; }\n");
result.append("span.ok { color: #009900; }\n");
result.append("span.warn { color: #990000; padding-left: 40px; }\n");
result.append("span.err { color: #990000; font-weight: bold; padding-left: 40px; }\n");
result.append("span.throw { color: #990000; font-weight: bold; }\n");
result.append("span.link1 { color: #666666; }\n");
result.append("span.link2 { color: #666666; padding-left: 40px; }\n");
result.append("span.link2 { color: #990000; }\n");
} else {
result.append(contents);
}
result.append("</style>\n");
return result.toString();
}
|
python
|
def get_snapshot(self, snapshot_id_or_uri, volume_id_or_uri=None):
"""
Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot.
"""
uri = self.__build_volume_snapshot_uri(volume_id_or_uri, snapshot_id_or_uri)
return self._client.get(uri)
|
java
|
public ServiceFuture<RoleDefinitionInner> createOrUpdateAsync(String scope, String roleDefinitionId, RoleDefinitionProperties properties, final ServiceCallback<RoleDefinitionInner> serviceCallback) {
return ServiceFuture.fromResponse(createOrUpdateWithServiceResponseAsync(scope, roleDefinitionId, properties), serviceCallback);
}
|
python
|
def sort(self):
"""Sort variants from most correct to consume, to least.
Sort rules:
version_priority:
- sort by highest versions of packages shared with request;
- THEN least number of additional packages added to solve;
- THEN highest versions of additional packages;
- THEN alphabetical on name of additional packages;
- THEN variant index.
intersection_priority:
- sort by highest number of packages shared with request;
- THEN sort according to version_priority
Note:
In theory 'variant.index' should never factor into the sort unless
two variants are identical (which shouldn't happen) - this is just
here as a safety measure so that sorting is guaranteed repeatable
regardless.
"""
if self.sorted:
return
def key(variant):
requested_key = []
names = set()
for i, request in enumerate(self.solver.request_list):
if not request.conflict:
req = variant.requires_list.get(request.name)
if req is not None:
requested_key.append((-i, req.range))
names.add(req.name)
additional_key = []
for request in variant.requires_list:
if not request.conflict and request.name not in names:
additional_key.append((request.range, request.name))
if (VariantSelectMode[config.variant_select_mode] ==
VariantSelectMode.version_priority):
k = (requested_key,
-len(additional_key),
additional_key,
variant.index)
else: # VariantSelectMode.intersection_priority
k = (len(requested_key),
requested_key,
-len(additional_key),
additional_key,
variant.index)
return k
self.variants.sort(key=key, reverse=True)
self.sorted = True
|
java
|
private void handleFlowResults(Queue<Env<AttrContext>> queue, ListBuffer<Element> elems) {
for (Env<AttrContext> env: queue) {
switch (env.tree.getTag()) {
case CLASSDEF:
JCClassDecl cdef = (JCClassDecl) env.tree;
if (cdef.sym != null)
elems.append(cdef.sym);
break;
case TOPLEVEL:
JCCompilationUnit unit = (JCCompilationUnit) env.tree;
if (unit.packge != null)
elems.append(unit.packge);
break;
}
}
genList.addAll(queue);
}
|
java
|
public DomainObjectMatch<DomainObject> TO_GENERIC(String domainObjectTypeName) {
Boolean br_old = null;
try {
TraversalExpression te = (TraversalExpression)this.astObject;
InternalDomainAccess iAccess = te.getQueryExecutor().getMappingInfo().getInternalDomainAccess();
iAccess.loadDomainInfoIfNeeded();
Class<?> clazz = iAccess.getClassForName(domainObjectTypeName);
br_old = QueryRecorder.blockRecording.get();
QueryRecorder.blockRecording.set(Boolean.TRUE);
DomainObjectMatch<?> delegate = TO(clazz);
QueryRecorder.blockRecording.set(br_old);
DomainObjectMatch<DomainObject> ret = APIAccess.createDomainObjectMatch(DomainObject.class, delegate);
QueryRecorder.recordAssignment(this, "TO_GENERIC", delegate, QueryRecorder.literal(domainObjectTypeName));
return ret;
} catch (Throwable e) {
if (e instanceof RuntimeException)
throw (RuntimeException)e;
else
throw new RuntimeException(e);
} finally {
if (br_old != null)
QueryRecorder.blockRecording.set(br_old);
}
}
|
java
|
public String getValue(char name, String defaultValue) {
return getValue(String.valueOf(name), defaultValue);
}
|
java
|
public static String getPrimitiveDefault(Class type) {
if (type == Boolean.class) {
return "false";
}
if (type == Character.class) {
return Character.toString((char) 0);
}
return "0";
}
|
python
|
def query_accession():
"""
Returns list of accession numbers by query query parameters
---
tags:
- Query functions
parameters:
- name: accession
in: query
type: string
required: false
description: UniProt accession number
default: P05067
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(
request_args=request.args,
allowed_str_args=['accession', 'entry_name'],
allowed_int_args=['limit']
)
return jsonify(query.accession(**args))
|
java
|
public static String rawTypeToString(TypeMirror type, char innerClassSeparator) {
if (!(type instanceof DeclaredType)) {
throw new IllegalArgumentException("Unexpected type: " + type);
}
StringBuilder result = new StringBuilder();
DeclaredType declaredType = (DeclaredType) type;
rawTypeToString(result, (TypeElement) declaredType.asElement(), innerClassSeparator);
return result.toString();
}
|
java
|
void parseDocument(
final Reader reader, final int suggestedBufferSize,
final IMarkupHandler handler, final ParseStatus status)
throws ParseException {
final long parsingStartTimeNanos = System.nanoTime();
char[] buffer = null;
try {
handler.handleDocumentStart(parsingStartTimeNanos, 1, 1);
int bufferSize = suggestedBufferSize;
buffer = this.pool.allocateBuffer(bufferSize);
int bufferContentSize = reader.read(buffer);
boolean cont = (bufferContentSize != -1);
status.offset = -1;
status.line = 1;
status.col = 1;
status.inStructure = false;
status.parsingDisabled = true;
status.parsingDisabledLimitSequence = null;
status.autoCloseRequired = null;
status.autoCloseLimits = null;
while (cont) {
parseBuffer(buffer, 0, bufferContentSize, handler, status);
int readOffset = 0;
int readLen = bufferSize;
if (status.offset == 0) {
if (bufferContentSize == bufferSize) {
// Buffer is not big enough, double it!
char[] newBuffer = null;
try {
bufferSize *= 2;
newBuffer = this.pool.allocateBuffer(bufferSize);
System.arraycopy(buffer, 0, newBuffer, 0, bufferContentSize);
this.pool.releaseBuffer(buffer);
buffer = newBuffer;
} catch (final Exception ignored) {
this.pool.releaseBuffer(newBuffer);
}
}
// it's possible for two reads to occur in a row and 1) read less than the bufferSize and 2)
// still not find the next tag/end of structure
readOffset = bufferContentSize;
readLen = bufferSize - readOffset;
} else if (status.offset < bufferContentSize) {
System.arraycopy(buffer, status.offset, buffer, 0, bufferContentSize - status.offset);
readOffset = bufferContentSize - status.offset;
readLen = bufferSize - readOffset;
status.offset = 0;
bufferContentSize = readOffset;
}
final int read = reader.read(buffer, readOffset, readLen);
if (read != -1) {
bufferContentSize = readOffset + read;
} else {
cont = false;
}
}
// Iteration done, now it's time to clean up in case we still have some text to be notified
int lastLine = status.line;
int lastCol = status.col;
final int lastStart = status.offset;
final int lastLen = bufferContentSize - lastStart;
if (lastLen > 0) {
if (status.inStructure) {
throw new ParseException(
"Incomplete structure: \"" + new String(buffer, lastStart, lastLen) + "\"", status.line, status.col);
}
handler.handleText(buffer, lastStart, lastLen, status.line, status.col);
// As we have produced an additional text event, we need to fast-forward the
// lastLine and lastCol position to include the last text structure.
for (int i = lastStart; i < (lastStart + lastLen); i++) {
final char c = buffer[i];
if (c == '\n') {
lastLine++;
lastCol = 1;
} else {
lastCol++;
}
}
}
final long parsingEndTimeNanos = System.nanoTime();
handler.handleDocumentEnd(parsingEndTimeNanos, (parsingEndTimeNanos - parsingStartTimeNanos), lastLine, lastCol);
} catch (final ParseException e) {
throw e;
} catch (final Exception e) {
throw new ParseException(e);
} finally {
this.pool.releaseBuffer(buffer);
try {
reader.close();
} catch (final Throwable ignored) {
// This exception can be safely ignored
}
}
}
|
java
|
public int getStatus() throws SystemException
{
Transaction tx = registry.getTransaction();
if (tx == null)
return Status.STATUS_NO_TRANSACTION;
return tx.getStatus();
}
|
python
|
def point_on_line(ab, c):
'''
point_on_line((a,b), c) yields True if point x is on line (a,b) and False otherwise.
'''
(a,b) = ab
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) == 2 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vca = a - c
vcb = b - c
uba = czdivide(vba, np.sqrt(np.sum(vba**2, axis=0)))
uca = czdivide(vca, np.sqrt(np.sum(vca**2, axis=0)))
return (np.isclose(np.sqrt(np.sum(vca**2, axis=0)), 0) |
np.isclose(np.sqrt(np.sum(vcb**2, axis=0)), 0) |
np.isclose(np.abs(np.sum(uba*uca, axis=0)), 1))
|
java
|
public Observable<ServiceResponse<Page<CertificateItem>>> getCertificateVersionsNextWithServiceResponseAsync(final String nextPageLink) {
return getCertificateVersionsNextSinglePageAsync(nextPageLink)
.concatMap(new Func1<ServiceResponse<Page<CertificateItem>>, Observable<ServiceResponse<Page<CertificateItem>>>>() {
@Override
public Observable<ServiceResponse<Page<CertificateItem>>> call(ServiceResponse<Page<CertificateItem>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(getCertificateVersionsNextWithServiceResponseAsync(nextPageLink));
}
});
}
|
python
|
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
|
java
|
@Override
public Class<?> getProxyClass() {
if (proxyClass != null) {
return proxyClass;
}
try {
if (StringUtils.isNotBlank(interfaceId)) {
this.proxyClass = ClassUtils.forName(interfaceId);
if (!proxyClass.isInterface()) {
throw ExceptionUtils.buildRuntime("service.interfaceId",
interfaceId, "interfaceId must set interface class, not implement class");
}
} else {
throw ExceptionUtils.buildRuntime("service.interfaceId",
"null", "interfaceId must be not null");
}
} catch (SofaRpcRuntimeException e) {
throw e;
} catch (Throwable e) {
throw new SofaRpcRuntimeException(e.getMessage(), e);
}
return proxyClass;
}
|
python
|
def start_processes(self, max_restarts=-1):
"""
Start processes and check their status. When some process crashes,
start it again. *max_restarts* is maximum amount of the restarts
across all processes. *processes* is a :class:`list` of the
:class:`ProcessWrapper` instances.
"""
while 1:
for process in self.processes:
if not process:
# When process has not been started, start it
if not process.has_started:
process.start()
continue
# When process has stopped, start it again
exitcode = process.exitcode
if exitcode != 0:
# Process has been signaled or crashed
if exitcode > 0:
self.logger.error(
"Process '%s' with pid %d died with exitcode "
"%d", process.name, process.pid, exitcode
)
else:
self.logger.error(
"Process '%s' with pid %d died due to %s",
process.name, process.pid,
SIGNALS_TO_NAMES_DICT[abs(exitcode)]
)
# Max restarts has been reached, exit
if not max_restarts:
self.logger.fatal("Too many child restarts")
break
# Start process again
process.start()
# Decrement max_restarts counter
if max_restarts > 0:
max_restarts -= 1
else:
# Process has stopped without error
self.logger.info(
"Process '%s' with pid %d has stopped",
process.name, process.pid
)
# Start process again
process.start()
self.logger.info(
"Process '%s' has been started with pid %d",
process.name, process.pid
)
else:
time.sleep(0.25)
continue
break
|
java
|
@Override
public CommercePriceListUserSegmentEntryRel findByUUID_G(String uuid,
long groupId) throws NoSuchPriceListUserSegmentEntryRelException {
CommercePriceListUserSegmentEntryRel commercePriceListUserSegmentEntryRel =
fetchByUUID_G(uuid, groupId);
if (commercePriceListUserSegmentEntryRel == null) {
StringBundler msg = new StringBundler(6);
msg.append(_NO_SUCH_ENTITY_WITH_KEY);
msg.append("uuid=");
msg.append(uuid);
msg.append(", groupId=");
msg.append(groupId);
msg.append("}");
if (_log.isDebugEnabled()) {
_log.debug(msg.toString());
}
throw new NoSuchPriceListUserSegmentEntryRelException(msg.toString());
}
return commercePriceListUserSegmentEntryRel;
}
|
python
|
def setTextEdit( self, textEdit ):
"""
Sets the text edit that this find widget will use to search.
:param textEdit | <QTextEdit>
"""
if ( self._textEdit ):
self._textEdit.removeAction(self._findAction)
self._textEdit = textEdit
if ( textEdit ):
textEdit.addAction(self._findAction)
|
java
|
@Override
public void buttonClick(final ClickEvent event) {
if (window.getParent() != null) {
isImplicitClose = true;
UI.getCurrent().removeWindow(window);
}
callback.response(event.getSource().equals(okButton));
}
|
java
|
public static void enrich(Throwable t, String updateMessage) {
String newMessage = createMessage(t.getMessage(), updateMessage, t.getClass().getSimpleName());
replace(t, newMessage);
}
|
python
|
def _load_nonlink_level(handler, level, pathtable, pathname):
"""
Loads level and builds appropriate type, without handling softlinks
"""
if isinstance(level, tables.Group):
if _sns and (level._v_title.startswith('SimpleNamespace:') or
DEEPDISH_IO_ROOT_IS_SNS in level._v_attrs):
val = SimpleNamespace()
dct = val.__dict__
elif level._v_title.startswith('list:'):
dct = {}
val = []
else:
dct = {}
val = dct
# in case of recursion, object needs to be put in pathtable
# before trying to fully load it
pathtable[pathname] = val
# Load sub-groups
for grp in level:
lev = _load_level(handler, grp, pathtable)
n = grp._v_name
# Check if it's a complicated pair or a string-value pair
if n.startswith('__pair'):
dct[lev['key']] = lev['value']
else:
dct[n] = lev
# Load attributes
for name in level._v_attrs._f_list():
if name.startswith(DEEPDISH_IO_PREFIX):
continue
v = level._v_attrs[name]
dct[name] = v
if level._v_title.startswith('list:'):
N = int(level._v_title[len('list:'):])
for i in range(N):
val.append(dct['i{}'.format(i)])
return val
elif level._v_title.startswith('tuple:'):
N = int(level._v_title[len('tuple:'):])
lst = []
for i in range(N):
lst.append(dct['i{}'.format(i)])
return tuple(lst)
elif level._v_title.startswith('nonetype:'):
return None
elif is_pandas_dataframe(level):
assert _pandas, "pandas is required to read this file"
store = _HDFStoreWithHandle(handler)
return store.get(level._v_pathname)
elif level._v_title.startswith('sparse:'):
frm = level._v_attrs.format
if frm in ('csr', 'csc', 'bsr'):
shape = tuple(level.shape[:])
cls = {'csr': sparse.csr_matrix,
'csc': sparse.csc_matrix,
'bsr': sparse.bsr_matrix}
matrix = cls[frm](shape)
matrix.data = level.data[:]
matrix.indices = level.indices[:]
matrix.indptr = level.indptr[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'dia':
shape = tuple(level.shape[:])
matrix = sparse.dia_matrix(shape)
matrix.data = level.data[:]
matrix.offsets = level.offsets[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'coo':
shape = tuple(level.shape[:])
matrix = sparse.coo_matrix(shape)
matrix.data = level.data[:]
matrix.col = level.col[:]
matrix.row = level.row[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
else:
raise ValueError('Unknown sparse matrix type: {}'.format(frm))
else:
return val
elif isinstance(level, tables.VLArray):
if level.shape == (1,):
return _load_pickled(level)
else:
return level[:]
elif isinstance(level, tables.Array):
if 'zeroarray_dtype' in level._v_attrs:
# Unpack zero-size arrays (shape is stored in an HDF5 array and
# type is stored in the attibute 'zeroarray_dtype')
dtype = level._v_attrs.zeroarray_dtype
sh = level[:]
return np.zeros(tuple(sh), dtype=dtype)
if 'strtype' in level._v_attrs:
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
return level[:].view(dtype=(np.unicode_, itemsize))
elif strtype == b'ascii':
return level[:].view(dtype=(np.string_, itemsize))
# This serves two purposes:
# (1) unpack big integers: the only time we save arrays like this
# (2) unpack non-deepdish "scalars"
if level.shape == ():
return level[()]
return level[:]
|
python
|
def preprocess_user_variables(userinput):
"""
<Purpose>
Command parser for user variables. Takes the raw userinput and replaces
each user variable with its set value.
<Arguments>
userinput: A raw user string
<Side Effects>
Each user variable will be replaced by the value that it was previously
set to.
<Exceptions>
UserError: User typed an unrecognized or invalid variable name
<Returns>
The preprocessed string
"""
retstr = ""
while '$' in userinput:
text_before_variable , variable_delimiter, userinput = userinput.partition('$')
retstr += text_before_variable
# Treat $$ as an escape for a single $.
# Also escape if there is nothing left
if not userinput or userinput.startswith('$'):
retstr += '$'
userinput = userinput[1:]
continue
# Look for the next space, or the next $. The closest one of these will be
# used as the delimiter. Then update the remaining user input.
space_variable_length = userinput.find(' ')
dollarsign_variable_length = userinput.find('$')
# If the length is -1, then the delimiter was not found.
# We use the length of the entire string to represent this.
# If there was one delimiter found, then that delimiter's value
# will always be less than the string's length.
# If it is a tie, then it simply means that the entire string
# is the variable name.
if space_variable_length == -1:
space_variable_length = len(userinput)
if dollarsign_variable_length == -1:
dollarsign_variable_length = len(userinput)
variable_length = min(space_variable_length, dollarsign_variable_length)
variable_name = userinput[:variable_length]
userinput = userinput[variable_length + 1:] # Skip the actual delimiter
# Perform the replacement!
# User may type in a variable that has not yet been defined
try:
retstr += uservariables[variable_name]
except KeyError:
raise seash_exceptions.UserError("Variable does not exist: "+variable_name)
# The user expects a space before the string right after the variable.
# e.g. 'loadkeys $myname as awesome' should turn into
# 'loadkeys theusername as awesome'
if space_variable_length < dollarsign_variable_length:
retstr += ' '
# Now add the remaining text after the last variable
else:
retstr += userinput
return retstr
|
python
|
def range(self, axis=None):
""" Return range tuple along specified axis """
return (self.min(axis=axis), self.max(axis=axis))
|
python
|
def policy_assignment_get(name, scope, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a specific policy assignment.
:param name: The name of the policy assignment to query.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_get testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
'''
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
policy = polconn.policy_assignments.get(
policy_assignment_name=name,
scope=scope
)
result = policy.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
result = {'error': str(exc)}
return result
|
python
|
def get_cmd_line(self):
"""
Return the full command line that will be used when this node
is run by DAGman.
"""
cmd = ""
cmd_list = self.get_cmd_tuple_list()
for argument in cmd_list:
cmd += ' '.join(argument) + " "
return cmd
|
python
|
def size(self):
""" Total unfiltered size of view.
"""
#return len(self._fetch_items())
if self._check_hash_view():
return 1
else:
return self.engine.open().view.size(xmlrpc.NOHASH, self.viewname)
|
python
|
def selection_pos(self):
"""Return start and end positions of the visual selection respectively."""
buff = self._vim.current.buffer
beg = buff.mark('<')
end = buff.mark('>')
return beg, end
|
java
|
public final void synpred16_InternalPureXbase_fragment() throws RecognitionException {
// InternalPureXbase.g:1679:6: ( ( '>' '>' ) )
// InternalPureXbase.g:1679:7: ( '>' '>' )
{
// InternalPureXbase.g:1679:7: ( '>' '>' )
// InternalPureXbase.g:1680:7: '>' '>'
{
match(input,29,FOLLOW_26); if (state.failed) return ;
match(input,29,FOLLOW_2); if (state.failed) return ;
}
}
}
|
java
|
static URI encodeTemporaryCheckpointFileLocation(UfsJournal journal) {
return URIUtils.appendPathOrDie(journal.getTmpDir(), UUID.randomUUID().toString());
}
|
java
|
@Override
public void handle(CommandContext ctx) throws CommandLineException {
recognizeArguments(ctx);
if(helpArg.isPresent(ctx.getParsedCommandLine())) {
printHelp(ctx);
return;
}
if(!isAvailable(ctx)) {
throw new CommandFormatException("The command is not available in the current context (e.g. required subsystems or connection to the controller might be unavailable).");
}
doHandle(ctx);
}
|
python
|
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
|
python
|
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names
|
java
|
public void setBucketAcl(SetBucketAclRequest request) {
checkNotNull(request, "request should not be null.");
InternalRequest internalRequest = this.createRequest(request, HttpMethodName.PUT);
internalRequest.addParameter("acl", null);
if (request.getCannedAcl() != null) {
internalRequest.addHeader(Headers.BCE_ACL, request.getCannedAcl().toString());
this.setZeroContentLength(internalRequest);
} else if (request.getAccessControlList() != null) {
byte[] json = null;
List<Grant> grants = request.getAccessControlList();
StringWriter writer = new StringWriter();
try {
JsonGenerator jsonGenerator = JsonUtils.jsonGeneratorOf(writer);
jsonGenerator.writeStartObject();
jsonGenerator.writeArrayFieldStart("accessControlList");
for (Grant grant : grants) {
jsonGenerator.writeStartObject();
jsonGenerator.writeArrayFieldStart("grantee");
for (Grantee grantee : grant.getGrantee()) {
jsonGenerator.writeStartObject();
jsonGenerator.writeStringField("id", grantee.getId());
jsonGenerator.writeEndObject();
}
jsonGenerator.writeEndArray();
jsonGenerator.writeArrayFieldStart("permission");
for (Permission permission : grant.getPermission()) {
jsonGenerator.writeString(permission.toString());
}
jsonGenerator.writeEndArray();
jsonGenerator.writeEndObject();
}
jsonGenerator.writeEndArray();
jsonGenerator.writeEndObject();
jsonGenerator.close();
} catch (IOException e) {
throw new BceClientException("Fail to generate json", e);
}
try {
json = writer.toString().getBytes(DEFAULT_ENCODING);
} catch (UnsupportedEncodingException e) {
throw new BceClientException("Fail to get UTF-8 bytes", e);
}
internalRequest.addHeader(Headers.CONTENT_LENGTH, String.valueOf(json.length));
internalRequest.addHeader(Headers.CONTENT_TYPE, "application/json");
internalRequest.setContent(RestartableInputStream.wrap(json));
} else if (request.getJsonAcl() != null) {
byte[] json = null;
try {
json = request.getJsonAcl().getBytes(DEFAULT_ENCODING);
} catch (UnsupportedEncodingException e) {
throw new BceClientException("Fail to get UTF-8 bytes", e);
}
internalRequest.addHeader(Headers.CONTENT_LENGTH, String.valueOf(json.length));
internalRequest.addHeader(Headers.CONTENT_TYPE, "application/json");
internalRequest.setContent(RestartableInputStream.wrap(json));
} else {
checkNotNull(null, "request.acl should not be null.");
}
this.invokeHttpClient(internalRequest, BosResponse.class);
}
|
python
|
def addDiscreteOutcomeConstantMean(distribution, x, p, sort = False):
'''
Adds a discrete outcome of x with probability p to an existing distribution,
holding constant the relative probabilities of other outcomes and overall mean.
Parameters
----------
distribution : [np.array]
Two element list containing a list of probabilities and a list of outcomes.
x : float
The new value to be added to the distribution.
p : float
The probability of the discrete outcome x occuring.
sort: bool
Whether or not to sort X before returning it
Returns
-------
X : np.array
Discrete points for discrete probability mass function.
pmf : np.array
Probability associated with each point in X.
Written by Matthew N. White
Latest update: 08 December 2015 by David Low
'''
X = np.append(x,distribution[1]*(1-p*x)/(1-p))
pmf = np.append(p,distribution[0]*(1-p))
if sort:
indices = np.argsort(X)
X = X[indices]
pmf = pmf[indices]
return([pmf,X])
|
java
|
public static PrefsTransform lookup(PrefsProperty property) {
TypeMirror typeMirror = property.getElement().asType();
TypeName typeName = typeName(typeMirror);
return lookup(typeName);
}
|
python
|
def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.