language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def generate_features( self, sentence_text, wid ):
''' Generates and returns a list of strings, containing tab-separated
features ID, FORM, LEMMA, CPOSTAG, POSTAG, FEATS of the word
(the word with index *wid* from the given *sentence_text*).
Parameters
-----------
sentence_text : estnltk.text.Text
Text object corresponding to a single sentence.
Words of the sentence, along with their morphological analyses,
should be accessible via the layer WORDS.
And each word should be a dict, containing morphological features
in ANALYSIS part;
wid : int
Index of the word/token, whose features need to be generated;
'''
assert WORDS in sentence_text and len(sentence_text[WORDS])>0, \
" (!) 'words' layer missing or empty in given Text!"
sentence = sentence_text[WORDS]
assert -1 < wid and wid < len(sentence), ' (!) Invalid word id: '+str(wid)
# 1) Pre-process (if required)
if wid == 0:
# *** Add adposition (_K_) type
if self.kSubCatRelsLex:
self.kFeatures = \
_findKsubcatFeatures( sentence, self.kSubCatRelsLex, addFeaturesToK = True )
# *** Add verb chain info
if self.addVerbcGramm or self.addNomAdvVinf:
self.vcFeatures = generate_verb_chain_features( sentence_text, \
addGrammPred=self.addVerbcGramm, \
addNomAdvVinf=self.addNomAdvVinf )
# *** Add sentence ending saying verbs
if self.addSeSayingVerbs:
self.sayingverbs = detect_sentence_ending_saying_verbs( sentence_text )
# *** Add clause boundary info
if self.addClauseBound:
self.clbFeatures = []
for tag in sentence_text.clause_annotations:
if not tag:
self.clbFeatures.append( [] )
elif tag == EMBEDDED_CLAUSE_START:
self.clbFeatures.append( ['emb_cl_start'] )
elif tag == EMBEDDED_CLAUSE_END:
self.clbFeatures.append( ['emb_cl_end'] )
elif tag == CLAUSE_BOUNDARY:
self.clbFeatures.append (['clb'] )
# 2) Generate the features
estnltkWord = sentence[wid]
# Pick the first analysis
firstAnalysis = estnltkWord[ANALYSIS][0]
strForm = []
# *** ID
strForm.append( str(wid+1) )
strForm.append( '\t' )
# *** FORM
word_text = estnltkWord[TEXT]
word_text = word_text.replace(' ', '_')
strForm.append( word_text )
strForm.append( '\t' )
# *** LEMMA
word_root = firstAnalysis[ROOT]
word_root = word_root.replace(' ', '_')
if len(word_root) == 0:
word_root = "??"
strForm.append( word_root )
strForm.append( '\t' )
# *** CPOSTAG
strForm.append( firstAnalysis[POSTAG] )
strForm.append( '\t' )
# *** POSTAG
finePos = firstAnalysis[POSTAG]
if self.addAmbiguousPos and len(estnltkWord[ANALYSIS]) > 1:
pos_tags = sorted(list(set([ a[POSTAG] for a in estnltkWord[ANALYSIS] ])))
finePos = '_'.join(pos_tags)
#if self.kFeatures and wid in self.kFeatures:
# finePos += '_'+self.kFeatures[wid]
strForm.append( finePos )
strForm.append( '\t' )
# *** FEATS (grammatical categories)
grammCats = []
if len(firstAnalysis[FORM]) != 0:
forms = firstAnalysis[FORM].split()
grammCats.extend( forms )
# add features from verb chains:
if self.vcFeatures and self.vcFeatures[wid]:
grammCats.extend( self.vcFeatures[wid] )
# add features from clause boundaries:
if self.addClauseBound and self.clbFeatures[wid]:
grammCats.extend( self.clbFeatures[wid] )
# add adposition type ("post" or "pre")
if self.kFeatures and wid in self.kFeatures:
grammCats.extend( [self.kFeatures[wid]] )
# add saying verb features
if self.sayingverbs and wid in self.sayingverbs:
grammCats.extend( [self.sayingverbs[wid]] )
# wrap up
if not grammCats:
grammCats = '_'
else:
grammCats = '|'.join( grammCats )
strForm.append( grammCats )
strForm.append( '\t' )
return strForm
|
java
|
public RateLimit getRateLimit() throws OAuthSystemException, OAuthProblemException {
cleanError();
prepareToken();
OneloginURLConnectionClient httpClient = new OneloginURLConnectionClient();
OAuthClient oAuthClient = new OAuthClient(httpClient);
OAuthClientRequest bearerRequest = new OAuthBearerClientRequest(settings.getURL(Constants.GET_RATE_URL))
//.setAccessToken(accessToken) // 'Authorization' => 'Bearer xxxx' not accepted right now
.buildHeaderMessage();
Map<String, String> headers = getAuthorizedHeader();
bearerRequest.setHeaders(headers);
OneloginOAuthJSONResourceResponse oAuthResponse = oAuthClient.resource(bearerRequest, OAuth.HttpMethod.GET, OneloginOAuthJSONResourceResponse.class);
RateLimit ratelimit = null;
if (oAuthResponse.getResponseCode() == 200) {
JSONObject data = oAuthResponse.getData();
if (data != null) {
ratelimit = new RateLimit(data);
}
} else {
error = oAuthResponse.getError();
errorDescription = oAuthResponse.getErrorDescription();
}
return ratelimit;
}
|
java
|
public CachedData encode( final Object object ) {
final CachedData result = _delegate.encode( object );
_statistics.register( StatsType.CACHED_DATA_SIZE, result.getData().length );
return result;
}
|
java
|
@Override
protected Object doExec(Element element, Object scope, String format, Object... arguments) throws IOException
{
Stack<Index> indexes = this.serializer.getIndexes();
if(indexes.size() == 0) {
throw new TemplateException("Required ordered collection index is missing. Numbering operator cancel execution.");
}
this.serializer.writeTextContent(getNumbering(this.serializer.getIndexes(), format));
return null;
}
|
java
|
private void learn(T[] data, RNNSearch<T,T> nns, int k, double radius, int[] y) {
if (k < 2) {
throw new IllegalArgumentException("Invalid k: " + k);
}
if (radius <= 0.0) {
throw new IllegalArgumentException("Invalid radius: " + radius);
}
this.k = k;
this.nns = nns;
this.radius = radius;
this.y = y;
this.size = new int[k];
int n = data.length;
for (int i = 0; i < n; i++) {
size[y[i]]++;
}
// Initialize the neighborhood list for each sample.
double[] px = new double[n];
// Neighbors of each sample.
ArrayList<ArrayList<Integer>> neighbors = new ArrayList<>();
for (int i = 0; i < n; i++) {
ArrayList<Neighbor<T,T>> list = new ArrayList<>();
nns.range(data[i], radius, list);
ArrayList<Integer> neighbor = new ArrayList<>(list.size());
neighbors.add(neighbor);
for (Neighbor<T,T> nt : list) {
neighbor.add(nt.index);
}
px[i] = (double) list.size() / n;
}
// Initialize a posterior probabilities.
// The count of each cluster in the neighborhood.
int[][] neighborhoodClusterSize = new int[n][k];
// The most significant cluster in the neighborhood.
int[] mostSignificantNeighborhoodCluster = new int[n];
for (int i = 0; i < n; i++) {
for (int j : neighbors.get(i)) {
neighborhoodClusterSize[i][y[j]]++;
}
}
for (int i = 0; i < n; i++) {
int max = 0;
for (int j = 0; j < k; j++) {
if (neighborhoodClusterSize[i][j] > max) {
mostSignificantNeighborhoodCluster[i] = j;
max = neighborhoodClusterSize[i][j];
}
}
}
// The number of samples with nonzero conditional entropy.
entropy = 0.0;
for (int i = 0; i < n; i++) {
if (!neighbors.get(i).isEmpty()) {
int ni = neighbors.get(i).size();
double m = 0.0;
for (int j = 0; j < k; j++) {
double r = ((double) neighborhoodClusterSize[i][j]) / ni;
if (r > 0) {
m -= r * Math.log2(r);
}
}
m *= px[i];
entropy += m;
}
}
double eps = 1.0;
while (eps >= 1E-7) {
for (int i = 0; i < n; i++) {
if (mostSignificantNeighborhoodCluster[i] != y[i]) {
double oldMutual = 0.0;
double newMutual = 0.0;
for (int neighbor : neighbors.get(i)) {
double nk = neighbors.get(neighbor).size();
double r1 = (double) neighborhoodClusterSize[neighbor][y[i]] / nk;
double r2 = (double) neighborhoodClusterSize[neighbor][mostSignificantNeighborhoodCluster[i]] / nk;
if (r1 > 0) {
oldMutual -= r1 * Math.log2(r1) * px[neighbor];
}
if (r2 > 0) {
oldMutual -= r2 * Math.log2(r2) * px[neighbor];
}
r1 = (neighborhoodClusterSize[neighbor][y[i]] - 1.0) / nk;
r2 = (neighborhoodClusterSize[neighbor][mostSignificantNeighborhoodCluster[i]] + 1.0) / nk;
if (r1 > 0) {
newMutual -= r1 * Math.log2(r1) * px[neighbor];
}
if (r2 > 0) {
newMutual -= r2 * Math.log2(r2) * px[neighbor];
}
}
if (newMutual < oldMutual) {
for (int neighbor : neighbors.get(i)) {
--neighborhoodClusterSize[neighbor][y[i]];
++neighborhoodClusterSize[neighbor][mostSignificantNeighborhoodCluster[i]];
int mi = mostSignificantNeighborhoodCluster[i];
int mk = mostSignificantNeighborhoodCluster[neighbor];
if (neighborhoodClusterSize[neighbor][mi] > neighborhoodClusterSize[neighbor][mk]) {
mostSignificantNeighborhoodCluster[neighbor] = mostSignificantNeighborhoodCluster[i];
}
}
size[y[i]]--;
size[mostSignificantNeighborhoodCluster[i]]++;
y[i] = mostSignificantNeighborhoodCluster[i];
}
}
}
double prevObj = entropy;
entropy = 0.0;
for (int i = 0; i < n; i++) {
if (!neighbors.get(i).isEmpty()) {
int ni = neighbors.get(i).size();
double m = 0.0;
for (int j = 0; j < k; j++) {
double r = ((double) neighborhoodClusterSize[i][j]) / ni;
if (r > 0) {
m -= r * Math.log2(r);
}
}
m *= px[i];
entropy += m;
}
}
eps = prevObj - entropy;
}
// Collapse clusters by removing clusters with no samples.
int nk = 0;
for (int i = 0; i < k; i++) {
if (size[i] > 0) {
nk++;
}
}
int[] count = new int[nk];
for (int i = 0, j = 0; i < k; i++) {
if (size[i] > 0) {
count[j] = size[i];
size[i] = j++;
}
}
for (int i = 0; i < n; i++) {
y[i] = size[y[i]];
}
this.k = nk;
size = count;
}
|
python
|
def get_gene_source_set(gtf):
"""
get a dictionary of the set of all sources for a gene
"""
gene_to_source = {}
db = get_gtf_db(gtf)
for feature in complete_features(db):
gene_id = feature['gene_id'][0]
sources = gene_to_source.get(gene_id, set([])).union(set([feature.source]))
gene_to_source[gene_id] = sources
return gene_to_source
|
java
|
public <T> T getViewRepresentation(Class<T> T, boolean forceRefresh) {
ScenarioGlobals currentScenarioGlobals = SenBotContext.getSenBotContext().getCucumberManager().getCurrentScenarioGlobals();
T foundView = currentScenarioGlobals == null ? null : (T) currentScenarioGlobals.getAttribute(T.getName());
if(foundView == null || forceRefresh) {
foundView = PageFactory.initElements(getAssociatedTestEnvironment().getWebDriver(), T);
if(currentScenarioGlobals != null) {
currentScenarioGlobals.setAttribute(T.getName(), foundView);
}
}
return foundView;
}
|
java
|
public static Boolean getAttributeBoolean(Tag tag, String attrName) throws EvaluatorException {
Boolean b = getAttributeLiteral(tag, attrName).getBoolean(null);
if (b == null) throw new EvaluatorException("attribute [" + attrName + "] must be a constant boolean value");
return b;
}
|
python
|
def pairwise_point_combinations(xs, ys, anchors):
"""
Does an in-place addition of the four points that can be composed by
combining coordinates from the two lists to the given list of anchors
"""
for i in xs:
anchors.append((i, max(ys)))
anchors.append((i, min(ys)))
for i in ys:
anchors.append((max(xs), i))
anchors.append((min(xs), i))
|
python
|
def ResolveHostnameToIP(host, port):
"""Resolves a hostname to an IP address."""
ip_addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC, 0,
socket.IPPROTO_TCP)
# getaddrinfo returns tuples (family, socktype, proto, canonname, sockaddr).
# We are interested in sockaddr which is in turn a tuple
# (address, port) for IPv4 or (address, port, flow info, scope id)
# for IPv6. In both cases, we want the first element, the address.
result = ip_addrs[0][4][0]
# TODO: In Python 2, this value is a byte string instead of UTF-8
# string. To ensure type correctness until support for Python 2 is dropped,
# we always decode this value.
if compatibility.PY2:
result = result.decode("ascii")
return result
|
python
|
def process_params(mod_id, params, type_params):
"""
Takes as input a dictionary of parameters defined on a module and the
information about the required parameters defined on the corresponding
module type. Validatates that are required parameters were supplied and
fills any missing parameters with their default values from the module
type. Returns a nested dictionary of the same format as the `type_params`
but with an additional key `value` on each inner dictionary that gives the
value of that parameter for this specific module
"""
res = {}
for param_name, param_info in type_params.items():
val = params.get(param_name, param_info.get("default", None))
# Check against explicit None (param could be explicitly False)
if val is not None:
param_res = dict(param_info)
param_res["value"] = val
res[param_name] = param_res
elif type_params.get("required", False):
raise ValueError(
'Required parameter "{}" is not defined for module '
'"{}"'.format(param_name, mod_id)
)
return res
|
python
|
def get_values(self):
"""
Returns the cpd
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
... [0.1, 0.1],
... [0.8, 0.8]],
... evidence='evi1', evidence_card=2)
>>> cpd.get_values()
array([[ 0.1, 0.1],
[ 0.1, 0.1],
[ 0.8, 0.8]])
"""
if self.variable in self.variables:
return self.values.reshape(self.cardinality[0], np.prod(self.cardinality[1:]))
else:
return self.values.reshape(1, np.prod(self.cardinality))
|
python
|
def get(self, interface_id):
"""
Get the interface from engine json
:param str interface_id: interface ID to find
:raises InterfaceNotFound: Cannot find interface
"""
# From within engine, skips nested iterators for this find
# Make sure were dealing with a string
interface_id = str(interface_id)
for intf in self:
if intf.interface_id == interface_id:
intf._engine = self.engine
return intf
else: # Check for inline interfaces
if '.' in interface_id:
# It's a VLAN interface
vlan = interface_id.split('.')
# Check that we're on the right interface
if intf.interface_id == vlan[0]:
if intf.has_vlan:
return intf.vlan_interface.get_vlan(vlan[-1])
elif intf.has_interfaces:
for interface in intf.interfaces:
if isinstance(interface, InlineInterface):
split_intf = interface.nicid.split('-')
if interface_id == interface.nicid or \
str(interface_id) in split_intf:
intf._engine = self.engine
return intf
raise InterfaceNotFound(
'Interface id {} was not found on this engine.'.format(interface_id))
|
java
|
Item completeBinop(JCTree lhs, JCTree rhs, OperatorSymbol operator) {
MethodType optype = (MethodType)operator.type;
int opcode = operator.opcode;
if (opcode >= if_icmpeq && opcode <= if_icmple &&
rhs.type.constValue() instanceof Number &&
((Number) rhs.type.constValue()).intValue() == 0) {
opcode = opcode + (ifeq - if_icmpeq);
} else if (opcode >= if_acmpeq && opcode <= if_acmpne &&
TreeInfo.isNull(rhs)) {
opcode = opcode + (if_acmp_null - if_acmpeq);
} else {
// The expected type of the right operand is
// the second parameter type of the operator, except for
// shifts with long shiftcount, where we convert the opcode
// to a short shift and the expected type to int.
Type rtype = operator.erasure(types).getParameterTypes().tail.head;
if (opcode >= ishll && opcode <= lushrl) {
opcode = opcode + (ishl - ishll);
rtype = syms.intType;
}
// Generate code for right operand and load.
genExpr(rhs, rtype).load();
// If there are two consecutive opcode instructions,
// emit the first now.
if (opcode >= (1 << preShift)) {
code.emitop0(opcode >> preShift);
opcode = opcode & 0xFF;
}
}
if (opcode >= ifeq && opcode <= if_acmpne ||
opcode == if_acmp_null || opcode == if_acmp_nonnull) {
return items.makeCondItem(opcode);
} else {
code.emitop0(opcode);
return items.makeStackItem(optype.restype);
}
}
|
java
|
protected OutputStream getAsynchronousOutputStream(
final OutputStream outputStream) throws IOException {
final int SIZE = 1024 * 1024 * 10;
final PipedOutputStream pos = new PipedOutputStream();
final PipedInputStream pis = new PipedInputStream(pos, SIZE);
final FinishableRunnable run = new FinishableRunnable() {
volatile boolean finish = false;
volatile boolean hasFinished = false;
@Override
public void finish() {
this.finish = true;
while (!this.hasFinished) {
// loop until thread is really finished
}
}
@Override
public void run() {
try {
byte[] bytes = new byte[SIZE];
// Note that we finish really gently here, writing all data
// that is still in the input first (in theory, new data
// could arrive asynchronously, so that the thread never
// finishes, but this is not the intended mode of
// operation).
for (int len; (!this.finish || pis.available() > 0)
&& (len = pis.read(bytes)) > 0;) {
outputStream.write(bytes, 0, len);
}
} catch (IOException e) {
e.printStackTrace();
} finally {
close(pis);
close(outputStream);
this.hasFinished = true;
}
}
};
new Thread(run, "async-output-stream").start();
this.outputStreams.add(new Closeable() {
@Override
public void close() throws IOException {
run.finish();
}
});
return pos;
}
|
java
|
public static void load() {
if (!isLoaded()) {
ScriptInjector.fromString(LocalForageResources.INSTANCE.js().getText()).setWindow(ScriptInjector.TOP_WINDOW).inject();
}
}
|
python
|
def _handle_stdout_event(self, fd, events):
"""Eventhandler for stdout"""
assert fd == self.fd_stdout
if events & self.ioloop.READ:
# got data ready to read
data = ''
# Now basically we have two cases: either the client supports
# HTTP/1.1 in which case we can stream the answer in chunked mode
# in HTTP/1.0 we need to send a content-length and thus buffer the complete output
if self.request.supports_http_1_1():
if not self.headers_sent:
self.sent_chunks = True
self.headers.update({'Date': get_date_header(), 'Transfer-Encoding': 'chunked'})
data = 'HTTP/1.1 200 OK\r\n' + '\r\n'.join([ k + ': ' + v for k, v in self.headers.items()]) + '\r\n\r\n'
if self.output_prelude:
data += hex(len(self.output_prelude))[2:] + "\r\n" # cut off 0x
data += self.output_prelude + "\r\n"
self.headers_sent = True
payload = os.read(fd, 8192)
if events & self.ioloop.ERROR: # there might be data remaining in the buffer if we got HUP, get it all
remainder = True
while remainder != '': # until EOF
remainder = os.read(fd, 8192)
payload += remainder
data += hex(len(payload))[2:] + "\r\n" # cut off 0x
data += payload + "\r\n"
else:
if not self.headers_sent:
# Use the over-eager blocking read that will get everything until we hit EOF
# this might actually be somewhat dangerous as noted in the subprocess documentation
# and lead to a deadlock. This is only a legacy mode for HTTP/1.0 clients anyway,
# so we might want to remove it entirely anyways
payload = self.process.stdout.read()
self.headers.update({'Date': get_date_header(), 'Content-Length': str(len(payload))})
data = 'HTTP/1.0 200 OK\r\n' + '\r\n'.join([ k + ': ' + v for k, v in self.headers.items()]) + '\r\n\r\n'
self.headers_sent = True
data += self.output_prelude + payload
else:
# this is actually somewhat illegal as it messes with content-length but
# it shouldn't happen anyways, as the read above should have read anything
# python docs say this can happen on ttys...
logger.error("This should not happen")
data = self.process.stdout.read()
if len(data) == 8200:
self.number_of_8k_chunks_sent += 1
else:
if self.number_of_8k_chunks_sent > 0:
logger.debug('Sent %d * 8192 bytes', self.number_of_8k_chunks_sent)
self.number_of_8k_chunks_sent = 0
logger.debug('Sending stdout to client %d bytes: %r', len(data), data[:20])
self.request.write(data)
# now we can also have an error. This is because tornado maps HUP onto error
# therefore, no elif here!
if events & self.ioloop.ERROR:
logger.debug('Error on stdout')
# ensure file is closed
if not self.process.stdout.closed:
self.process.stdout.close()
# remove handler
self.ioloop.remove_handler(self.fd_stdout)
# if all fds are closed, we can finish
return self._graceful_finish()
|
java
|
public static void printHeaders(HttpServletRequest request) {
Enumeration<String> e = request.getHeaderNames();
while (e.hasMoreElements()) {
String name = e.nextElement();
String value = request.getHeader(name);
logger.info("header " + name + ":" + value);
}
}
|
python
|
def email_from_name(self):
"""Portal email name
"""
lab_from_name = self.laboratory.getName()
portal_from_name = self.portal.email_from_name
return lab_from_name or portal_from_name
|
python
|
def entry_to_matrix(prodigy_entry):
"""
Take in a line from the labeled json and return a vector of labels and a matrix of features
for training.
Two ways to get 0s:
- marked as false by user
- generated automatically from other entries when guess is correct
Rather than iterating through entities, just get the number of the correct entity directly.
Then get one or two GPEs before and after.
"""
doc = prodigy_entry['text']
doc = nlp(doc)
geo_proced = geo.process_text(doc, require_maj=False)
# find the geoproced entity that matches the Prodigy entry
ent_text = np.asarray([gp['word'] for gp in geo_proced]) # get mask for correct ent
#print(ent_text)
match = ent_text == entry['meta']['word']
#print("match: ", match)
anti_match = np.abs(match - 1)
#print("Anti-match ", anti_match)
match_position = match.argmax()
geo_proc = geo_proced[match_position]
iso = geo.cts[prodigy_entry['label']] # convert country text label to ISO
feat = geo.features_to_matrix(geo_proc)
answer_x = feat['matrix']
label = np.asarray(feat['labels'])
if prodigy_entry['answer'] == "accept":
answer_binary = label == iso
answer_binary = answer_binary.astype('int')
#print(answer_x.shape)
#print(answer_binary.shape)
elif prodigy_entry['answer'] == "reject":
# all we know is that the label that was presented is wrong.
# just return the corresponding row in the feature matrix,
# and force the label to be 0
answer_binary = label == iso
answer_x = answer_x[answer_binary,:] # just take the row corresponding to the answer
answer_binary = np.asarray([0]) # set the outcome to 0 because reject
# NEED TO SHARE LABELS ACROSS! THE CORRECT ONE MIGHT NOT EVEN APPEAR FOR ALL ENTITIES
x = feat['matrix']
other_x = x[anti_match,:]
#print(other_x)
#print(label[anti_match])
# here, need to get the rows corresponding to the correct label
# print(geo_proc['meta'])
# here's where we get the other place name features.
# Need to:
# 1. do features_to_matrix but use the label of the current entity
# to determine 0/1 in the feature matrix
# 2. put them all into one big feature matrix,
# 3. ...ordering by distance? And need to decide max entity length
# 4. also include these distances as one of the features
#print(answer_x.shape[0])
#print(answer_binary.shape[0])
try:
if answer_x.shape[0] == answer_binary.shape[0]:
return (answer_x, answer_binary)
except:
pass
|
python
|
def fromagp(args):
"""
%prog fromagp agpfile componentfasta objectfasta
Generate chain file from AGP format. The components represent the old
genome (target) and the objects represent new genome (query).
"""
from jcvi.formats.agp import AGP
from jcvi.formats.sizes import Sizes
p = OptionParser(fromagp.__doc__)
p.add_option("--novalidate", default=False, action="store_true",
help="Do not validate AGP")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
agpfile, componentfasta, objectfasta = args
chainfile = agpfile.rsplit(".", 1)[0] + ".chain"
fw = open(chainfile, "w")
agp = AGP(agpfile, validate=(not opts.novalidate))
componentsizes = Sizes(componentfasta).mapping
objectsizes = Sizes(objectfasta).mapping
chain = "chain"
score = 1000
tStrand = "+"
id = 0
for a in agp:
if a.is_gap:
continue
tName = a.component_id
tSize = componentsizes[tName]
tStart = a.component_beg
tEnd = a.component_end
tStart -= 1
qName = a.object
qSize = objectsizes[qName]
qStrand = "-" if a.orientation == "-" else "+"
qStart = a.object_beg
qEnd = a.object_end
if qStrand == '-':
_qStart = qSize - qEnd + 1
_qEnd = qSize - qStart + 1
qStart, qEnd = _qStart, _qEnd
qStart -= 1
id += 1
size = a.object_span
headerline = "\t".join(str(x) for x in (
chain, score, tName, tSize, tStrand, tStart,
tEnd, qName, qSize, qStrand, qStart, qEnd, id
))
alignmentline = size
print(headerline, file=fw)
print(alignmentline, file=fw)
print(file=fw)
fw.close()
logging.debug("File written to `{0}`.".format(chainfile))
|
java
|
private ConfListVo convert(Config config, String appNameString, String envName, ZkDisconfData zkDisconfData) {
ConfListVo confListVo = new ConfListVo();
confListVo.setConfigId(config.getId());
confListVo.setAppId(config.getAppId());
confListVo.setAppName(appNameString);
confListVo.setEnvName(envName);
confListVo.setEnvId(config.getEnvId());
confListVo.setCreateTime(config.getCreateTime());
confListVo.setModifyTime(config.getUpdateTime().substring(0, 12));
confListVo.setKey(config.getName());
// StringEscapeUtils.escapeHtml escape
confListVo.setValue(CodeUtils.unicodeToUtf8(config.getValue()));
confListVo.setVersion(config.getVersion());
confListVo.setType(DisConfigTypeEnum.getByType(config.getType()).getModelName());
confListVo.setTypeId(config.getType());
//
//
//
if (zkDisconfData != null) {
confListVo.setMachineSize(zkDisconfData.getData().size());
List<ZkDisconfDataItem> datalist = zkDisconfData.getData();
MachineListVo machineListVo = getZkData(datalist, config);
confListVo.setErrorNum(machineListVo.getErrorNum());
confListVo.setMachineList(machineListVo.getDatalist());
confListVo.setMachineSize(machineListVo.getMachineSize());
}
return confListVo;
}
|
python
|
def _executor(self, host):
''' handler for multiprocessing library '''
try:
exec_rc = self._executor_internal(host)
#if type(exec_rc) != ReturnData and type(exec_rc) != ansible.runner.return_data.ReturnData:
# raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
|
java
|
public OvhTask serviceName_domainTrust_domainTrustId_addDomainUserOnComposer_POST(String serviceName, Long domainTrustId, String domain, String password, String username) throws IOException {
String qPath = "/horizonView/{serviceName}/domainTrust/{domainTrustId}/addDomainUserOnComposer";
StringBuilder sb = path(qPath, serviceName, domainTrustId);
HashMap<String, Object>o = new HashMap<String, Object>();
addBody(o, "domain", domain);
addBody(o, "password", password);
addBody(o, "username", username);
String resp = exec(qPath, "POST", sb.toString(), o);
return convertTo(resp, OvhTask.class);
}
|
java
|
public void initDriver(File shpFile, ShapeType shapeType, DbaseFileHeader dbaseHeader) throws IOException {
String path = shpFile.getAbsolutePath();
String nameWithoutExt = path.substring(0,path.lastIndexOf('.'));
this.shpFile = new File(nameWithoutExt+".shp");
this.shxFile = new File(nameWithoutExt+".shx");
this.dbfFile = new File(nameWithoutExt+".dbf");
FileOutputStream shpFos = new FileOutputStream(shpFile);
FileOutputStream shxFos = new FileOutputStream(shxFile);
shapefileWriter = new ShapefileWriter(shpFos.getChannel(), shxFos.getChannel());
this.shapeType = shapeType;
shapefileWriter.writeHeaders(shapeType);
dbfDriver.initDriver(dbfFile, dbaseHeader);
}
|
python
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
call_sid=self._solution['call_sid'],
)
return self._context
|
java
|
public void disambiguateHistoricDates(JCas jcas){
// build up a list with all found TIMEX expressions
List<Timex3> linearDates = new ArrayList<Timex3>();
FSIterator iterTimex = jcas.getAnnotationIndex(Timex3.type).iterator();
// Create List of all Timexes of types "date" and "time"
while (iterTimex.hasNext()) {
Timex3 timex = (Timex3) iterTimex.next();
if (timex.getTimexType().equals("DATE") || timex.getTimexType().equals("TIME")) {
linearDates.add(timex);
}
}
//////////////////////////////////////////////
// go through list of Date and Time timexes //
//////////////////////////////////////////////
for (int i = 1; i < linearDates.size(); i++) {
Timex3 t_i = (Timex3) linearDates.get(i);
String value_i = t_i.getTimexValue();
String newValue = value_i;
Boolean change = false;
if (!(t_i.getFoundByRule().contains("-BCADhint"))){
if (value_i.startsWith("0")){
Integer offset = 1, counter = 1;
do {
if ((i == 1 || (i > 1 && !change)) && linearDates.get(i-offset).getTimexValue().startsWith("BC")){
if (value_i.length()>1){
if ((linearDates.get(i-offset).getTimexValue().startsWith("BC"+value_i.substring(0,2))) ||
(linearDates.get(i-offset).getTimexValue().startsWith("BC"+String.format("%02d",(Integer.parseInt(value_i.substring(0,2))+1))))){
if (((value_i.startsWith("00")) && (linearDates.get(i-offset).getTimexValue().startsWith("BC00"))) ||
((value_i.startsWith("01")) && (linearDates.get(i-offset).getTimexValue().startsWith("BC01")))){
if ((value_i.length()>2) && (linearDates.get(i-offset).getTimexValue().length()>4)){
if (Integer.parseInt(value_i.substring(0,3)) <= Integer.parseInt(linearDates.get(i-offset).getTimexValue().substring(2,5))){
newValue = "BC" + value_i;
change = true;
Logger.printDetail("DisambiguateHistoricDates: "+value_i+" to "+newValue+". Expression "+t_i.getCoveredText()+" due to "+linearDates.get(i-offset).getCoveredText());
}
}
}
else{
newValue = "BC" + value_i;
change = true;
Logger.printDetail("DisambiguateHistoricDates: "+value_i+" to "+newValue+". Expression "+t_i.getCoveredText()+" due to "+linearDates.get(i-offset).getCoveredText());
}
}
}
}
if ((linearDates.get(i-offset).getTimexType().equals("TIME") || linearDates.get(i-offset).getTimexType().equals("DATE")) &&
(linearDates.get(i-offset).getTimexValue().matches("^\\d.*"))) {
counter++;
}
} while (counter < 5 && ++offset < i);
}
}
if (!(newValue.equals(value_i))){
t_i.removeFromIndexes();
Logger.printDetail("DisambiguateHistoricDates: value changed to BC");
t_i.setTimexValue(newValue);
t_i.addToIndexes();
linearDates.set(i, t_i);
}
}
}
|
python
|
def get_installed_version_of(name, location=None):
'''Gets the installed version of the given dap or None if not installed
Searches in all dirs by default, otherwise in the given one'''
if location:
locations = [location]
else:
locations = _data_dirs()
for loc in locations:
if name not in get_installed_daps(loc):
continue
meta = '{d}/meta/{dap}.yaml'.format(d=loc, dap=name)
data = yaml.load(open(meta), Loader=Loader)
return str(data['version'])
return None
|
python
|
def gen_date_by_year(year):
"""
获取当前年的随机时间字符串
:param:
* year: (string) 长度为 4 位的年份字符串
:return:
* date_str: (string) 传入年份的随机合法的日期
举例如下::
print('--- GetRandomTime.gen_date_by_year demo ---')
print(GetRandomTime.gen_date_by_year("2010"))
print('---')
执行结果::
--- GetRandomTime.gen_date_by_year demo ---
20100505
---
"""
if isinstance(year, int) and len(str(year)) != 4:
raise ValueError("year should be int year like 2018, but we got {}, {}".
format(year, type(year)))
if isinstance(year, str) and len(year) != 4:
raise ValueError("year should be string year like '2018', but we got {}, {}".
format(year, type(year)))
if isinstance(year, int):
year = str(year)
date_str = GetRandomTime.gen_date_by_range(year + "-01-01", year + "-12-31", "%Y%m%d")
return date_str
|
python
|
def _extract_attr_typed_value(txn_data):
"""
ATTR and GET_ATTR can have one of 'raw', 'enc' and 'hash' fields.
This method checks which of them presents and return it's name
and value in it.
"""
existing_keys = [key for key in ALL_ATR_KEYS if key in txn_data]
if len(existing_keys) == 0:
raise ValueError("ATTR should have one of the following fields: {}"
.format(ALL_ATR_KEYS))
if len(existing_keys) > 1:
raise ValueError("ATTR should have only one of the following fields: {}"
.format(ALL_ATR_KEYS))
existing_key = existing_keys[0]
return existing_key, txn_data[existing_key]
|
java
|
public void include(String target, String element, Map<String, ?> parameterMap) throws JspException {
include(target, element, false, parameterMap);
}
|
java
|
public static boolean delete(File file)
{
if (!file.exists())
return false;
if (file.isDirectory())
{
File[] files = file.listFiles();
for (int i=0;files!=null && i<files.length;i++)
delete(files[i]);
}
return file.delete();
}
/* ------------------------------------------------------------ */
/** Run copy for copyThread()
*/
public void handle(Object o)
{
Job job=(Job)o;
try {
if (job.in!=null)
copy(job.in,job.out,-1);
else
copy(job.read,job.write,-1);
}
catch(IOException e)
{
LogSupport.ignore(log,e);
try{
if (job.out!=null)
job.out.close();
if (job.write!=null)
job.write.close();
}
catch(IOException e2)
{
LogSupport.ignore(log,e2);
}
}
}
/* ------------------------------------------------------------ */
/**
* @return An outputstream to nowhere
*/
public static OutputStream getNullStream()
{
return __nullStream;
}
/**
* closes an input stream, and logs exceptions
*
* @param is the input stream to close
*/
public static void close(InputStream is)
{
try
{
if (is != null)
is.close();
}
catch (IOException e)
{
LogSupport.ignore(log,e);
}
}
/**
* closes an output stream, and logs exceptions
*
* @param os the output stream to close
*/
public static void close(OutputStream os)
{
try
{
if (os != null)
os.close();
}
catch (IOException e)
{
LogSupport.ignore(log,e);
}
}
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
private static class NullOS extends OutputStream
{
public void close(){}
public void flush(){}
public void write(byte[]b){}
public void write(byte[]b,int i,int l){}
public void write(int b){}
}
private static NullOS __nullStream = new NullOS();
/* ------------------------------------------------------------ */
/**
* @return An writer to nowhere
*/
public static Writer getNullWriter()
{
return __nullWriter;
}
/* ------------------------------------------------------------ */
/* ------------------------------------------------------------ */
private static class NullWrite extends Writer
{
public void close(){}
public void flush(){}
public void write(char[]b){}
public void write(char[]b,int o,int l){}
public void write(int b){}
public void write(String s){}
public void write(String s,int o,int l){}
}
private static NullWrite __nullWriter = new NullWrite();
}
|
python
|
def get_trending_daily_not_starred(self):
"""Gets trending repositories NOT starred by user
:return: List of daily-trending repositories which are not starred
"""
trending_daily = self.get_trending_daily() # repos trending daily
starred_repos = self.get_starred_repos() # repos starred by user
repos_list = []
for repo in trending_daily:
if repo not in starred_repos:
repos_list.append(repo)
return repos_list
|
java
|
protected int decodeMandatoryParameters(ISUPParameterFactory parameterFactory, byte[] b, int index)
throws ParameterException {
int localIndex = index;
index += super.decodeMandatoryParameters(parameterFactory, b, index);
if (b.length - index > 1) {
try {
byte[] informationInd = new byte[2];
informationInd[0] = b[index++];
informationInd[1] = b[index++];
InformationRequestIndicators bci = parameterFactory.createInformationRequestIndicators();
((AbstractISUPParameter) bci).decode(informationInd);
this.setInformationRequestIndicators(bci);
} catch (Exception e) {
// AIOOBE or IllegalArg
throw new ParameterException("Failed to parse BackwardCallIndicators due to: ", e);
}
// return 3;
return index - localIndex;
} else {
throw new IllegalArgumentException("byte[] must have atleast 2 octets");
}
}
|
java
|
@Pure
public List<Object> getCurrentValues() {
if (this.allValues == null) {
return Collections.emptyList();
}
return Collections.unmodifiableList(this.allValues);
}
|
java
|
private void initXmlBundle() throws CmsException {
CmsFile file = m_cms.readFile(m_resource);
m_bundleFiles.put(null, m_resource);
m_xmlBundle = CmsXmlContentFactory.unmarshal(m_cms, file);
initKeySetForXmlBundle();
}
|
python
|
def is_length(property_name, *, min_length=1, max_length=None, present_optional=False):
"""Returns a Validation that checks the length of a string."""
def check(val):
"""Checks that a value matches a scope-enclosed set of length parameters."""
if not val:
return present_optional
else:
if len(val) >= min_length:
if max_length is None:
return True
else:
return len(val) <= max_length
else:
return False
if max_length:
message = "must be at least {0} characters long".format(min_length)
else:
message = "must be between {0} and {1} characters long".format(min_length, max_length)
return Validation(check, property_name, message)
|
python
|
def which(program):
"""Locate `program` in PATH
Arguments:
program (str): Name of program, e.g. "python"
"""
def is_exe(fpath):
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
return True
return False
for path in os.environ["PATH"].split(os.pathsep):
for ext in os.getenv("PATHEXT", "").split(os.pathsep):
fname = program + ext.lower()
abspath = os.path.join(path.strip('"'), fname)
if is_exe(abspath):
return abspath
return None
|
java
|
@Deprecated
public static void setIntHeader(HttpMessage message, String name, Iterable<Integer> values) {
message.headers().set(name, values);
}
|
java
|
public boolean verify() throws SignatureException {
if (verified)
return verifyResult;
if (sigAttr != null) {
sig.update(sigAttr);
if (RSAdata != null) {
byte msd[] = messageDigest.digest();
messageDigest.update(msd);
}
verifyResult = (Arrays.equals(messageDigest.digest(), digestAttr) && sig.verify(digest));
}
else {
if (RSAdata != null)
sig.update(messageDigest.digest());
verifyResult = sig.verify(digest);
}
verified = true;
return verifyResult;
}
|
python
|
def _watch_progress(handler):
"""Context manager for creating a unix-domain socket and listen for
ffmpeg progress events.
The socket filename is yielded from the context manager and the
socket is closed when the context manager is exited.
Args:
handler: a function to be called when progress events are
received; receives a ``key`` argument and ``value``
argument. (The example ``show_progress`` below uses tqdm)
Yields:
socket_filename: the name of the socket file.
"""
with _tmpdir_scope() as tmpdir:
socket_filename = os.path.join(tmpdir, 'sock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.bind(socket_filename)
sock.listen(1)
child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
try:
yield socket_filename
except:
gevent.kill(child)
raise
|
java
|
@Override
public ValueComparator compare(Value o) {
if (o == null) {
return ValueComparator.NOT_EQUAL_TO;
}
switch (o.getType()) {
case NUMBERVALUE:
NumberValue other = (NumberValue) o;
int comp = compareTo(other);
return comp > 0 ? ValueComparator.GREATER_THAN
: (comp < 0 ? ValueComparator.LESS_THAN
: ValueComparator.EQUAL_TO);
case INEQUALITYNUMBERVALUE:
InequalityNumberValue other2 = (InequalityNumberValue) o;
int comp2 = num.compareTo((BigDecimal) other2.getNumber());
switch (other2.getComparator()) {
case EQUAL_TO:
return comp2 > 0 ? ValueComparator.GREATER_THAN
: (comp2 < 0 ? ValueComparator.LESS_THAN
: ValueComparator.EQUAL_TO);
case GREATER_THAN:
return comp2 <= 0 ? ValueComparator.LESS_THAN
: ValueComparator.UNKNOWN;
default:
return comp2 >= 0 ? ValueComparator.GREATER_THAN
: ValueComparator.UNKNOWN;
}
case VALUELIST:
ValueList<?> vl = (ValueList<?>) o;
return equals(vl) ? ValueComparator.EQUAL_TO
: ValueComparator.NOT_EQUAL_TO;
default:
return ValueComparator.NOT_EQUAL_TO;
}
}
|
python
|
def replace(self, key, value, time, compress_level=-1):
"""
Replace a key/value to server ony if it does exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is replace False if key does not exists
:rtype: bool
"""
return self._set_add_replace('replace', key, value, time, compress_level=compress_level)
|
java
|
final void close(final ConnectionPoolConnection conn) {
if(closeQueue != null) {
closeQueue.add(conn);
} else {
closer.close(conn);
}
}
|
java
|
void setClosedPath(int path, boolean b_yes_no) {
if (isClosedPath(path) == b_yes_no)
return;
if (getPathSize(path) > 0) {
int first = getFirstVertex(path);
int last = getLastVertex(path);
if (b_yes_no) {
// make a circular list
setNextVertex_(last, first);
setPrevVertex_(first, last);
// set segment to NULL (just in case)
int vindex = getVertexIndex(last);
setSegmentToIndex_(vindex, null);
} else {
setNextVertex_(last, -1);
setPrevVertex_(first, -1);
int vindex = getVertexIndex(last);
setSegmentToIndex_(vindex, null);
}
}
int oldflags = getPathFlags_(path);
int flags = (oldflags | (int) PathFlags_.closedPath)
- (int) PathFlags_.closedPath;// clear the bit;
setPathFlags_(path, flags
| (b_yes_no ? (int) PathFlags_.closedPath : 0));
}
|
java
|
public final void setUndoManagerPolicy(UndoManagerPolicy policy) throws NullPointerException {
if (policy == null) {
throw new NullPointerException("The policy must not be null.");
}
if (this.policy == policy) {
return;
}
final UndoManagerPolicy oldPolicy = this.policy;
this.policy = policy;
if (oldPolicy == UndoManagerPolicy.DEFAULT) {
this.textComponent.removePropertyChangeListener("editable", this);
this.textComponent.removePropertyChangeListener("enabled", this);
}
if (this.policy == UndoManagerPolicy.DEFAULT) {
this.textComponent.addPropertyChangeListener("editable", this);
this.textComponent.addPropertyChangeListener("enabled", this);
}
handleUndoManagerPolicy();
}
|
python
|
def extract_variables(href):
"""Return a list of variable names used in a URI template."""
patterns = [re.sub(r'\*|:\d+', '', pattern)
for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)]
variables = []
for pattern in patterns:
for part in pattern.split(","):
if not part in variables:
variables.append(part)
return variables
|
python
|
def _decode_addr_key(self, obj_dict):
"""
Callback function to handle the decoding of the 'Addr' field.
Serf msgpack 'Addr' as an IPv6 address, and the data needs to be unpack
using socket.inet_ntop().
See: https://github.com/KushalP/serfclient-py/issues/20
:param obj_dict: A dictionary containing the msgpack map.
:return: A dictionary with the correct 'Addr' format.
"""
key = b'Addr'
if key in obj_dict:
try:
# Try to convert a packed IPv6 address.
# Note: Call raises ValueError if address is actually IPv4.
ip_addr = socket.inet_ntop(socket.AF_INET6, obj_dict[key])
# Check if the address is an IPv4 mapped IPv6 address:
# ie. ::ffff:xxx.xxx.xxx.xxx
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr.lstrip('::ffff:')
obj_dict[key] = ip_addr.encode('utf-8')
except ValueError:
# Try to convert a packed IPv4 address.
ip_addr = socket.inet_ntop(socket.AF_INET, obj_dict[key])
obj_dict[key] = ip_addr.encode('utf-8')
return obj_dict
|
java
|
synchronized public Class toClass()
throws ClassNotFoundException, UtilEvalError
{
if ( asClass != null )
return asClass;
reset();
// "var" means untyped, return null class
if ( evalName.equals("var") )
return asClass = null;
/* Try straightforward class name first */
Class clas = namespace.getClass( evalName );
if ( clas == null )
{
/*
Try toObject() which knows how to work through inner classes
and see what we end up with
*/
Object obj = null;
try {
// Null interpreter and callstack references.
// class only resolution should not require them.
obj = toObject( null, null, true );
} catch ( UtilEvalError e ) { } // couldn't resolve it
if ( obj instanceof ClassIdentifier )
clas = ((ClassIdentifier)obj).getTargetClass();
}
if ( clas == null )
throw new ClassNotFoundException(
"Class: " + value+ " not found in namespace");
asClass = clas;
return asClass;
}
|
python
|
def imresize(img, size, interpolate="bilinear", channel_first=False, **kwargs):
"""
Resize ``img`` to ``size``.
As default, the shape of input image has to be (height, width, channel).
Args:
img (numpy.ndarray): Input image.
size (tuple of int): Output shape. The order is (width, height).
interpolate (str): Interpolation method.
This argument is depend on the backend.
If you want to specify this argument, you should pay much attention to which backend you use now.
What you can select is below:
- pil backend: ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
- cv2 backend: ["nearest", "bilinear", "bicubic", "lanczos"].
Default is "bilinear" for both backends.
channel_first (bool):
If True, the shape of the output array is (channel, height, width) for RGB image. Default is False.
Returns:
numpy.ndarray
"""
return backend_manager.module.imresize(img, size, interpolate=interpolate, channel_first=channel_first, **kwargs)
|
python
|
def where(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
duoa=i.get('data_uoa','')
r=ck.find_path_to_repo({'repo_uoa':duoa})
if r['return']>0: return r
d=r.get('dict',{})
url=d.get('url','')
p=r['path']
if p=='':
p=url
if o=='con':
ck.out(p)
return r
|
java
|
public static <T extends GraphQLType> List<T> sortGraphQLTypes(Collection<T> types) {
List<T> sorted = new ArrayList<>(types);
sorted.sort(graphQLTypeComparator());
return sorted;
}
|
python
|
def _get_col_epsg(mapped_class, geom_attr):
"""Get the EPSG code associated with a geometry attribute.
Arguments:
geom_attr
the key of the geometry property as defined in the SQLAlchemy
mapper. If you use ``declarative_base`` this is the name of
the geometry attribute as defined in the mapped class.
"""
col = class_mapper(mapped_class).get_property(geom_attr).columns[0]
return col.type.srid
|
python
|
def __extract_directory(self, path, files, destination):
"""Extracts a single directory to the specified directory on disk.
Args:
path (str):
Relative (to the root of the archive) path of the directory
to extract.
files (dict):
A dictionary of files from a *.asar file header.
destination (str):
The path to extract the files to.
"""
# assures the destination directory exists
destination_path = os.path.join(destination, path)
if not os.path.exists(destination_path):
os.makedirs(destination_path)
for name, contents in files.items():
item_path = os.path.join(path, name)
# objects that have a 'files' member are directories,
# recurse into them
if 'files' in contents:
self.__extract_directory(
item_path,
contents['files'],
destination
)
continue
self.__extract_file(item_path, contents, destination)
|
java
|
public Optional<DataType> handleFinalSet( TraversalStep traversalStep, Object tree, String key, DataType data ) {
Optional<DataType> optSub = traversalStep.get( tree, key );
if ( !optSub.isPresent() || optSub.get() == null ) {
// nothing is here so just set the data
traversalStep.overwriteSet( tree, key, data );
}
else if ( optSub.get() instanceof List ) {
// there is a list here, so we just add to it
((List<Object>) optSub.get()).add( data );
}
else {
// take whatever is there and make it the first element in an Array
List<Object> temp = new ArrayList<>();
temp.add( optSub.get() );
temp.add( data );
traversalStep.overwriteSet( tree, key, temp );
}
return Optional.of( data );
}
|
java
|
public boolean validate(OneKey cnKey) throws CoseException {
CBORObject obj = CBORObject.NewArray();
obj.Add(contextString);
if (objProtected.size() > 0) obj.Add(rgbProtected);
else obj.Add(CBORObject.FromObject(new byte[0]));
obj.Add(externalData);
obj.Add(rgbContent);
return validateSignature(obj.EncodeToBytes(), rgbSignature, cnKey);
}
|
java
|
private FieldPosition[] getNegativePrefixFieldPositions() {
if (negativePrefixFieldPositions == null) {
if (negPrefixPattern != null) {
negativePrefixFieldPositions = expandAffix(negPrefixPattern);
} else {
negativePrefixFieldPositions = EmptyFieldPositionArray;
}
}
return negativePrefixFieldPositions;
}
|
python
|
def next(self):
"""
Handles the iteration by pulling the next line out of the stream,
attempting to convert the response to JSON if necessary.
:returns: Data representing what was seen in the feed
"""
while True:
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data
|
java
|
protected void logSoapMessage(String logMessage, SoapMessage soapMessage, boolean incoming) throws TransformerException {
Transformer transformer = createIndentingTransformer();
StringWriter writer = new StringWriter();
transformer.transform(soapMessage.getEnvelope().getSource(), new StreamResult(writer));
logMessage(logMessage, XMLUtils.prettyPrint(writer.toString()), incoming);
}
|
python
|
def fix_vcf_line(parts, ref_base):
"""Orient VCF allele calls with respect to reference base.
Handles cases with ref and variant swaps. strand complements.
"""
swap = {"1/1": "0/0", "0/1": "0/1", "0/0": "1/1", "./.": "./."}
complements = {"G": "C", "A": "T", "C": "G", "T": "A", "N": "N"}
varinfo, genotypes = fix_line_problems(parts)
ref, var = varinfo[3:5]
# non-reference regions or non-informative, can't do anything
if ref_base in [None, "N"] or set(genotypes) == set(["./."]):
varinfo = None
# matching reference, all good
elif ref_base == ref:
assert ref_base == ref, (ref_base, parts)
# swapped reference and alternate regions
elif ref_base == var or ref in ["N", "0"]:
varinfo[3] = var
varinfo[4] = ref
genotypes = [swap[x] for x in genotypes]
# reference is on alternate strand
elif ref_base != ref and complements.get(ref) == ref_base:
varinfo[3] = complements[ref]
varinfo[4] = ",".join([complements[v] for v in var.split(",")])
# unspecified alternative base
elif ref_base != ref and var in ["N", "0"]:
varinfo[3] = ref_base
varinfo[4] = ref
genotypes = [swap[x] for x in genotypes]
# swapped and on alternate strand
elif ref_base != ref and complements.get(var) == ref_base:
varinfo[3] = complements[var]
varinfo[4] = ",".join([complements[v] for v in ref.split(",")])
genotypes = [swap[x] for x in genotypes]
else:
print "Did not associate ref {0} with line: {1}".format(
ref_base, varinfo)
if varinfo is not None:
return varinfo + genotypes
|
java
|
public static Number count(char[] self, Object value) {
return count(InvokerHelper.asIterator(self), value);
}
|
python
|
def depends_on(self, dependency):
"""
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
"""
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")]
|
python
|
def script(klass, args, interval):
"""
Run the script *args* every *interval* (e.g. "10s") to peform health
check
"""
if isinstance(args, six.string_types) \
or isinstance(args, six.binary_type):
warnings.warn(
"Check.script should take a list of args", DeprecationWarning)
args = ["sh", "-c", args]
return {'args': args, 'interval': interval}
|
java
|
public static Long createLong(final String str) {
if (str == null) {
return null;
}
return Long.decode(str);
}
|
java
|
protected static BufferedImage loadImage (InputStream iis)
throws IOException
{
BufferedImage image;
if (iis instanceof ImageInputStream) {
image = ImageIO.read(iis);
} else {
// if we don't already have an image input stream, create a memory cache image input
// stream to avoid causing freakout if we're used in a sandbox because ImageIO
// otherwise use FileCacheImageInputStream which tries to create a temp file
MemoryCacheImageInputStream mciis = new MemoryCacheImageInputStream(iis);
image = ImageIO.read(mciis);
try {
// this doesn't close the underlying stream
mciis.close();
} catch (IOException ioe) {
// ImageInputStreamImpl.close() throws an IOException if it's already closed;
// there's no way to find out if it's already closed or not, so we have to check
// the exception message to determine if this is actually warning worthy
if (!"closed".equals(ioe.getMessage())) {
log.warning("Failure closing image input '" + iis + "'.", ioe);
}
}
}
// finally close our input stream
StreamUtil.close(iis);
return image;
}
|
java
|
private void doMultiMapRemove(final Message<JsonObject> message) {
final String name = message.body().getString("name");
if (name == null) {
message.reply(new JsonObject().putString("status", "error").putString("message", "No name specified."));
return;
}
final Object key = message.body().getValue("key");
if (key == null) {
message.reply(new JsonObject().putString("status", "error").putString("message", "No key specified."));
return;
}
final Object value = message.body().getValue("value");
if (value != null) {
context.execute(new Action<Boolean>() {
@Override
public Boolean perform() {
return data.getMultiMap(formatKey(name)).remove(key, value);
}
}, new Handler<AsyncResult<Boolean>>() {
@Override
public void handle(AsyncResult<Boolean> result) {
if (result.failed()) {
message.reply(new JsonObject().putString("status", "error").putString("message", result.cause().getMessage()));
} else {
message.reply(new JsonObject().putString("status", "ok").putBoolean("result", result.result()));
}
}
});
} else {
context.execute(new Action<Collection<Object>>() {
@Override
public Collection<Object> perform() {
return data.getMultiMap(formatKey(name)).remove(key);
}
}, new Handler<AsyncResult<Collection<Object>>>() {
@Override
public void handle(AsyncResult<Collection<Object>> result) {
if (result.failed()) {
message.reply(new JsonObject().putString("status", "error").putString("message", result.cause().getMessage()));
} else {
message.reply(new JsonObject().putString("status", "ok").putArray("result", new JsonArray(result.result().toArray(new Object[result.result().size()]))));
}
}
});
}
}
|
python
|
def notify_change(self, change):
"""Called when a property has changed."""
# Send the state to the frontend before the user-registered callbacks
# are called.
name = change['name']
if self.comm is not None and self.comm.kernel is not None:
# Make sure this isn't information that the front-end just sent us.
if name in self.keys and self._should_send_property(name, getattr(self, name)):
# Send new state to front-end
self.send_state(key=name)
super(Widget, self).notify_change(change)
|
python
|
def parse(self, vd, extent_loc):
# type: (bytes, int) -> None
'''
Parse a Volume Descriptor out of a string.
Parameters:
vd - The string containing the Volume Descriptor.
extent_loc - The location on the ISO of this Volume Descriptor.
Returns:
Nothing.
'''
################ PVD VERSION ######################
(descriptor_type, identifier, self.version, self.flags,
self.system_identifier, self.volume_identifier, unused1,
space_size_le, space_size_be, self.escape_sequences, set_size_le,
set_size_be, seqnum_le, seqnum_be, logical_block_size_le,
logical_block_size_be, path_table_size_le, path_table_size_be,
self.path_table_location_le, self.optional_path_table_location_le,
self.path_table_location_be, self.optional_path_table_location_be,
root_dir_record, self.volume_set_identifier, pub_ident_str,
prepare_ident_str, app_ident_str, self.copyright_file_identifier,
self.abstract_file_identifier, self.bibliographic_file_identifier,
vol_create_date_str, vol_mod_date_str, vol_expire_date_str,
vol_effective_date_str, self.file_structure_version, unused2,
self.application_use, zero_unused) = struct.unpack_from(self.FMT, vd, 0)
# According to Ecma-119, 8.4.1, the primary volume descriptor type
# should be 1.
if descriptor_type != self._vd_type:
raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor')
# According to Ecma-119, 8.4.2, the identifier should be 'CD001'.
if identifier != b'CD001':
raise pycdlibexception.PyCdlibInvalidISO('invalid CD isoIdentification')
# According to Ecma-119, 8.4.3, the version should be 1 (or 2 for
# ISO9660:1999)
expected_versions = [1]
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY:
expected_versions.append(2)
if self.version not in expected_versions:
raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor version %d' % (self.version))
# According to Ecma-119, 8.4.4, the first flags field should be 0 for a Primary.
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY and self.flags != 0:
raise pycdlibexception.PyCdlibInvalidISO('PVD flags field is not zero')
# According to Ecma-119, 8.4.5, the first unused field (after the
# system identifier and volume identifier) should be 0.
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero')
# According to Ecma-119, 8.4.9, the escape sequences for a PVD should
# be 32 zero-bytes. However, we have seen ISOs in the wild (Fantastic
# Night Dreams - Cotton Original (Japan).cue from the psx redump
# collection) that don't have this set to 0, so allow anything here.
# According to Ecma-119, 8.4.30, the file structure version should be 1.
# However, we have seen ISOs in the wild that that don't have this
# properly set to one. In those cases, forcibly set it to one and let
# it pass.
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY:
if self.file_structure_version != 1:
self.file_structure_version = 1
elif self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY:
if self.file_structure_version not in (1, 2):
raise pycdlibexception.PyCdlibInvalidISO('File structure version expected to be 1')
# According to Ecma-119, 8.4.31, the second unused field should be 0.
if unused2 != 0:
raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero')
# According to Ecma-119, the last 653 bytes of the VD should be all 0.
# However, we have seen ISOs in the wild that do not follow this, so
# relax the check.
# Check to make sure that the little-endian and big-endian versions
# of the parsed data agree with each other.
if space_size_le != utils.swab_32bit(space_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian space size disagree')
self.space_size = space_size_le
if set_size_le != utils.swab_16bit(set_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian set size disagree')
self.set_size = set_size_le
if seqnum_le != utils.swab_16bit(seqnum_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian seqnum disagree')
self.seqnum = seqnum_le
if logical_block_size_le != utils.swab_16bit(logical_block_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian logical block size disagree')
self.log_block_size = logical_block_size_le
if path_table_size_le != utils.swab_32bit(path_table_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table size disagree')
self.path_tbl_size = path_table_size_le
self.path_table_num_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2
self.path_table_location_be = utils.swab_32bit(self.path_table_location_be)
self.publisher_identifier = FileOrTextIdentifier()
self.publisher_identifier.parse(pub_ident_str)
self.preparer_identifier = FileOrTextIdentifier()
self.preparer_identifier.parse(prepare_ident_str)
self.application_identifier = FileOrTextIdentifier()
self.application_identifier.parse(app_ident_str)
self.volume_creation_date = dates.VolumeDescriptorDate()
self.volume_creation_date.parse(vol_create_date_str)
self.volume_modification_date = dates.VolumeDescriptorDate()
self.volume_modification_date.parse(vol_mod_date_str)
self.volume_expiration_date = dates.VolumeDescriptorDate()
self.volume_expiration_date.parse(vol_expire_date_str)
self.volume_effective_date = dates.VolumeDescriptorDate()
self.volume_effective_date.parse(vol_effective_date_str)
self.root_dir_record.parse(self, root_dir_record, None)
self.orig_extent_loc = extent_loc
self._initialized = True
|
python
|
def build_response(
self, data: AwaitableOrValue[Optional[Dict[str, Any]]]
) -> AwaitableOrValue[ExecutionResult]:
"""Build response.
Given a completed execution context and data, build the (data, errors) response
defined by the "Response" section of the GraphQL spec.
"""
if isawaitable(data):
async def build_response_async():
return self.build_response(await data)
return build_response_async()
data = cast(Optional[Dict[str, Any]], data)
errors = self.errors
if not errors:
return ExecutionResult(data, None)
# Sort the error list in order to make it deterministic, since we might have
# been using parallel execution.
errors.sort(key=lambda error: (error.locations, error.path, error.message))
return ExecutionResult(data, errors)
|
java
|
@Override
public void lock(T entity, LockModeType lockMode) {
getEntityManager().lock(entity, lockMode);
}
|
java
|
public byte[] readBytes() throws IORuntimeException {
long len = file.length();
if (len >= Integer.MAX_VALUE) {
throw new IORuntimeException("File is larger then max array size");
}
byte[] bytes = new byte[(int) len];
FileInputStream in = null;
int readLength;
try {
in = new FileInputStream(file);
readLength = in.read(bytes);
if(readLength < len){
throw new IOException(StrUtil.format("File length is [{}] but read [{}]!", len, readLength));
}
} catch (Exception e) {
throw new IORuntimeException(e);
} finally {
IoUtil.close(in);
}
return bytes;
}
|
python
|
def update_from_stripe_data(self, stripe_coupon, exclude_fields=None, commit=True):
"""
Update StripeCoupon object with data from stripe.Coupon without calling stripe.Coupon.retrieve.
To only update the object, set the commit param to False.
Returns the number of rows altered or None if commit is False.
"""
fields_to_update = self.STRIPE_FIELDS - set(exclude_fields or [])
update_data = {key: stripe_coupon[key] for key in fields_to_update}
for field in ["created", "redeem_by"]:
if update_data.get(field):
update_data[field] = timestamp_to_timezone_aware_date(update_data[field])
if update_data.get("amount_off"):
update_data["amount_off"] = Decimal(update_data["amount_off"]) / 100
# also make sure the object is up to date (without the need to call database)
for key, value in six.iteritems(update_data):
setattr(self, key, value)
if commit:
return StripeCoupon.objects.filter(pk=self.pk).update(**update_data)
|
java
|
@SuppressWarnings("unchecked")
private StringBuffer buildEngineScript(StringBuffer engineScript,ServletContext servletContext) {
List<Container> containers = ContainerUtil.getAllPublishedContainers(servletContext);
String allowGetForSafariButMakeForgeryEasier = "";
String scriptTagProtection = DwrConstants.SCRIPT_TAG_PROTECTION;
String pollWithXhr = "";
String sessionCookieName = "JSESSIONID";
for(Iterator<Container> it = containers.iterator();it.hasNext();) {
Container container = it.next();
ServerLoadMonitor monitor = (ServerLoadMonitor) container.getBean(ServerLoadMonitor.class.getName());
pollWithXhr = monitor.supportsStreaming() ? "false" : "true";
if(null != container.getBean("allowGetForSafariButMakeForgeryEasier")){
allowGetForSafariButMakeForgeryEasier = (String)container.getBean("allowGetForSafariButMakeForgeryEasier");
}
if(null != container.getBean("scriptTagProtection")){
scriptTagProtection = (String)container.getBean("scriptTagProtection");
}
if(null != container.getBean("sessionCookieName")){
sessionCookieName = (String)container.getBean("sessionCookieName");
}
}
StringBuffer sb = new StringBuffer();
Matcher matcher = PARAMS_PATTERN.matcher(engineScript);
while(matcher.find()) {
String match = matcher.group();
if("${allowGetForSafariButMakeForgeryEasier}".equals(match)){
matcher.appendReplacement(sb, allowGetForSafariButMakeForgeryEasier);
}
else if("${pollWithXhr}".equals(match)){
matcher.appendReplacement(sb, pollWithXhr);
}
else if("${sessionCookieName}".equals(match)){
matcher.appendReplacement(sb, sessionCookieName);
}
else if("${scriptTagProtection}".equals(match)){
matcher.appendReplacement(sb, scriptTagProtection);
}
else if("${scriptSessionId}".equals(match)){
matcher.appendReplacement(sb, "\"+JAWR.dwr_scriptSessionId+\"");
}
else if("${defaultPath}".equals(match)){
matcher.appendReplacement(sb, "\"+JAWR.jawr_dwr_path+\"");
}
}
DWRParamWriter.setUseDynamicSessionId(true);
matcher.appendTail(sb);
return sb;
}
|
java
|
public static String transform(File xmlFile, boolean verbose)
throws SAXException, IOException
{
if (logger.isLoggable(Level.FINER))
{
logger.exiting(className, "transform(InputStream, boolean)");
}
FileInputStream fis = new FileInputStream(xmlFile);
String result = null;
result = transform(fis,verbose);
fis.close();
if (logger.isLoggable(Level.FINER))
{
logger.exiting(className, "transform(InputStream, boolean)");
}
return result;
}
|
java
|
@Override
public void clearCache(CPDefinition cpDefinition) {
entityCache.removeResult(CPDefinitionModelImpl.ENTITY_CACHE_ENABLED,
CPDefinitionImpl.class, cpDefinition.getPrimaryKey());
finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITH_PAGINATION);
finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION);
clearUniqueFindersCache((CPDefinitionModelImpl)cpDefinition, true);
}
|
java
|
public BigDecimal getDecimalSecond()
{
BigDecimal sec = BigDecimal.valueOf(_second);
if (_fraction != null)
{
sec = sec.add(_fraction);
}
return sec;
}
|
python
|
def _get_arg_parser(func, types, args_and_defaults, delimiter_chars):
"""Return an ArgumentParser for the given function. Arguments are defined
from the function arguments and their associated defaults.
Args:
func: function for which we want an ArgumentParser
types: types to which the command line arguments should be converted to
args_and_defaults: list of 2-tuples (arg_name, arg_default)
delimiter_chars: characters used to separate the parameters from their
help message in the docstring
"""
_LOG.debug("Creating ArgumentParser for '%s'", func.__name__)
(description, arg_help) = _prepare_doc(
func, [x for (x, _) in args_and_defaults], delimiter_chars)
parser = argparse.ArgumentParser(description=description)
for ((arg, default), arg_type) in zip_longest(args_and_defaults, types):
help_msg = arg_help[arg]
if default is NoDefault:
arg_type = arg_type or identity_type
if arg_type == bool:
_LOG.debug("Adding optional flag %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, default=True, required=False,
action="store_false",
help="%s. Defaults to True if not specified"
% help_msg)
else:
_LOG.debug("Adding positional argument %s.%s", func.__name__,
arg)
parser.add_argument(arg, help=help_msg, type=arg_type)
else:
if default is None and arg_type is None:
raise ParseThisError("To use default value of 'None' you need "
"to specify the type of the argument '{}' "
"for the method '{}'"
.format(arg, func.__name__))
arg_type = arg_type or type(default)
if arg_type == bool:
action = "store_false" if default else "store_true"
_LOG.debug("Adding optional flag %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, help=help_msg,
default=default, action=action)
else:
_LOG.debug(
"Adding optional argument %s.%s", func.__name__, arg)
parser.add_argument("--%s" % arg, help=help_msg,
default=default, type=arg_type)
return parser
|
java
|
public static Properties getSystemProperties() {
try {
return new Properties(System.getProperties());
} catch (final SecurityException ex) {
LowLevelLogUtil.logException("Unable to access system properties.", ex);
// Sandboxed - can't read System Properties
return new Properties();
}
}
|
java
|
public final void service(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
_jspService(request, response);
}
|
java
|
@Nullable
static Drawable maybeWrapWithScaleType(
@Nullable Drawable drawable,
@Nullable ScalingUtils.ScaleType scaleType,
@Nullable PointF focusPoint) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("WrappingUtils#maybeWrapWithScaleType");
}
if (drawable == null || scaleType == null) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return drawable;
}
ScaleTypeDrawable scaleTypeDrawable = new ScaleTypeDrawable(drawable, scaleType);
if (focusPoint != null) {
scaleTypeDrawable.setFocusPoint(focusPoint);
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return scaleTypeDrawable;
}
|
java
|
public InputType getOutputType(InputType... inputType) throws InvalidKerasConfigurationException {
if (inputType.length > 1)
throw new InvalidKerasConfigurationException(
"Keras BatchNorm layer accepts only one input (received " + inputType.length + ")");
return this.getBatchNormalizationLayer().getOutputType(-1, inputType[0]);
}
|
python
|
def _parse_name(self, config):
""" _parse_name scans the provided configuration block and extracts
the vlan name. The config block is expected to always return the
vlan name. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vlan configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
"""
value = NAME_RE.search(config).group('value')
return dict(name=value)
|
java
|
@Override public void init(ServletConfig config) throws ServletException{
super.init(config);
getStats = new ServletStats("get", getMonitoringIntervals());
postStats = new ServletStats("post", getMonitoringIntervals());
putStats = new ServletStats("put", getMonitoringIntervals());
headStats = new ServletStats("head", getMonitoringIntervals());
optionsStats = new ServletStats("options", getMonitoringIntervals());
traceStats = new ServletStats("trace", getMonitoringIntervals());
deleteStats = new ServletStats("delete", getMonitoringIntervals());
lastModifiedStats = new ServletStats("lastModified", getMonitoringIntervals());
cachedStatList = new ArrayList<IStats>(useShortStatList()? 2 : 8);
cachedStatList.add(getStats);
cachedStatList.add(postStats);
if (!useShortStatList()){
cachedStatList.add(deleteStats);
cachedStatList.add(headStats);
cachedStatList.add(optionsStats);
cachedStatList.add(putStats);
cachedStatList.add(traceStats);
cachedStatList.add(lastModifiedStats);
}
ProducerRegistryFactory.getProducerRegistryInstance().registerProducer(this);
}
|
python
|
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
|
python
|
def queryjoin(argdict=dict(), **kwargs):
"""Turn a dictionary into a querystring for a URL.
>>> args = dict(a=1, b=2, c="foo")
>>> queryjoin(args)
"a=1&b=2&c=foo"
"""
if kwargs: argdict.update(kwargs)
if issubclass(type(argdict), dict):
args = ["{}={}".format(k, v) for k, v in argdict.items() if v != None]
return "&".join(args)
|
python
|
def wait(name, url='http://localhost:8080/manager', timeout=180):
'''
Wait for the Tomcat Manager to load.
Notice that if tomcat is not running we won't wait for it start and the
state will fail. This state can be required in the tomcat.war_deployed
state to make sure tomcat is running and that the manager is running as
well and ready for deployment.
url : http://localhost:8080/manager
The URL of the server with the Tomcat Manager webapp.
timeout : 180
Timeout for HTTP request to the Tomcat Manager.
Example:
.. code-block:: yaml
tomcat-service:
service.running:
- name: tomcat
- enable: True
wait-for-tomcatmanager:
tomcat.wait:
- timeout: 300
- require:
- service: tomcat-service
jenkins:
tomcat.war_deployed:
- name: /ran
- war: salt://jenkins-1.2.4.war
- require:
- tomcat: wait-for-tomcatmanager
'''
result = __salt__['tomcat.status'](url, timeout)
ret = {'name': name,
'result': result,
'changes': {},
'comment': ('tomcat manager is ready' if result
else 'tomcat manager is not ready')
}
return ret
|
java
|
@Override
public DescribeProvisioningParametersResult describeProvisioningParameters(DescribeProvisioningParametersRequest request) {
request = beforeClientExecution(request);
return executeDescribeProvisioningParameters(request);
}
|
java
|
public static boolean isTypeIncludedIn(TypeName value, Type... types) {
for (Type item : types) {
if (value.equals(typeName(item))) {
return true;
}
}
return false;
}
|
python
|
def save(self):
"""
Saves changes made to the locally cached SecurityDocument object's data
structures to the remote database.
"""
resp = self.r_session.put(
self.document_url,
data=self.json(),
headers={'Content-Type': 'application/json'}
)
resp.raise_for_status()
|
python
|
def show_current_number(parser, token):
"""Show the current page number, or insert it in the context.
This tag can for example be useful to change the page title according to
the current page number.
To just show current page number:
.. code-block:: html+django
{% show_current_number %}
If you use multiple paginations in the same page, you can get the page
number for a specific pagination using the querystring key, e.g.:
.. code-block:: html+django
{% show_current_number using mykey %}
The default page when no querystring is specified is 1. If you changed it
in the `paginate`_ template tag, you have to call ``show_current_number``
according to your choice, e.g.:
.. code-block:: html+django
{% show_current_number starting from page 3 %}
This can be also achieved using a template variable you passed to the
context, e.g.:
.. code-block:: html+django
{% show_current_number starting from page page_number %}
You can of course mix it all (the order of arguments is important):
.. code-block:: html+django
{% show_current_number starting from page 3 using mykey %}
If you want to insert the current page number in the context, without
actually displaying it in the template, use the *as* argument, i.e.:
.. code-block:: html+django
{% show_current_number as page_number %}
{% show_current_number
starting from page 3 using mykey as page_number %}
"""
# Validate args.
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
key = None
number = None
tag_name = token.contents[0]
var_name = None
else:
# Use a regexp to catch args.
match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args)
if match is None:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Retrieve objects.
groupdict = match.groupdict()
key = groupdict['key']
number = groupdict['number']
var_name = groupdict['var_name']
# Call the node.
return ShowCurrentNumberNode(number, key, var_name)
|
python
|
def allocate_mid(mids):
"""
Allocate a MID which has not been used yet.
"""
i = 0
while True:
mid = str(i)
if mid not in mids:
mids.add(mid)
return mid
i += 1
|
java
|
@Override
public String get(Object key) {
return this.groupedMap.get(DEFAULT_GROUP, Convert.toStr(key));
}
|
java
|
public static int getOptionPos(String flag, String[] options) {
if (options == null)
return -1;
for (int i = 0; i < options.length; i++) {
if ((options[i].length() > 0) && (options[i].charAt(0) == '-')) {
// Check if it is a negative number
try {
Double.valueOf(options[i]);
}
catch (NumberFormatException e) {
// found?
if (options[i].equals("-" + flag))
return i;
// did we reach "--"?
if (options[i].charAt(1) == '-')
return -1;
}
}
}
return -1;
}
|
java
|
public Set<TypeSignature> findNamedTypes() {
final Set<TypeSignature> collectedNamedTypes = new HashSet<>();
methods().forEach(m -> {
findNamedTypes(collectedNamedTypes, m.returnTypeSignature());
m.parameters().forEach(p -> findNamedTypes(collectedNamedTypes, p.typeSignature()));
m.exceptionTypeSignatures().forEach(s -> findNamedTypes(collectedNamedTypes, s));
});
return ImmutableSortedSet.copyOf(comparing(TypeSignature::name), collectedNamedTypes);
}
|
java
|
public synchronized PaymentChannelServerState getOrCreateState(Wallet wallet, TransactionBroadcaster broadcaster) throws VerificationException {
if (state == null) {
switch (majorVersion) {
case 1:
state = new PaymentChannelV1ServerState(this, wallet, broadcaster);
break;
case 2:
state = new PaymentChannelV2ServerState(this, wallet, broadcaster);
break;
default:
throw new IllegalStateException("Invalid version number found");
}
}
checkArgument(wallet == state.wallet);
return state;
}
|
python
|
def _initialize_stretching_matrix(self):
""" Set up the stretching matrix """
self.S = np.zeros((self.nz, self.nz))
if (self.nz==2) and (self.rd) and (self.delta):
self.del1 = self.delta/(self.delta+1.)
self.del2 = (self.delta+1.)**-1
self.Us = self.Ubg[0]-self.Ubg[1]
self.F1 = self.rd**-2 / (1.+self.delta)
self.F2 = self.delta*self.F1
self.S[0,0], self.S[0,1] = -self.F1, self.F1
self.S[1,0], self.S[1,1] = self.F2, -self.F2
else:
for i in range(self.nz):
if i == 0:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i]
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
elif i == self.nz-1:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
else:
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i] = -(self.f2/self.Hi[i]/self.gpi[i] +
self.f2/self.Hi[i]/self.gpi[i-1])
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
|
python
|
def update(self, activity_sid=values.unset, attributes=values.unset,
friendly_name=values.unset,
reject_pending_reservations=values.unset):
"""
Update the WorkerInstance
:param unicode activity_sid: The activity_sid
:param unicode attributes: The attributes
:param unicode friendly_name: The friendly_name
:param bool reject_pending_reservations: The reject_pending_reservations
:returns: Updated WorkerInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
"""
return self._proxy.update(
activity_sid=activity_sid,
attributes=attributes,
friendly_name=friendly_name,
reject_pending_reservations=reject_pending_reservations,
)
|
python
|
def escape_html(value, allowed_tags=[], allowed_attributes=[],
allowed_styles=[]):
"""
Template tag to sanitize string values. It accepts lists of
allowed tags, attributes or styles in comma separated string or list format.
For example:
{% load sanitizer %}
{% escape_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %}
Will output:
<a href="">bar</a> <cript>alert('baz')</script>
On django 1.4 you could also use keyword arguments:
{% escape_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %}
"""
if isinstance(value, basestring):
value = bleach.clean(value, tags=allowed_tags,
attributes=allowed_attributes,
styles=allowed_styles, strip=False)
return value
|
python
|
def rectify(self, slitlet2d, resampling, inverse=False):
"""Rectify slitlet using computed transformation.
Parameters
----------
slitlet2d : numpy array
Image containing the 2d slitlet image.
resampling : int
1: nearest neighbour, 2: flux preserving interpolation.
inverse : bool
If true, the inverse rectification transformation is
employed.
Returns
-------
slitlet2d_rect : numpy array
Rectified slitlet image.
"""
if resampling not in [1, 2]:
raise ValueError("Unexpected resampling value=" + str(resampling))
# check image dimension
naxis2, naxis1 = slitlet2d.shape
if naxis1 != self.bb_nc2_orig - self.bb_nc1_orig + 1:
raise ValueError("Unexpected slitlet2d_rect naxis1")
if naxis2 != self.bb_ns2_orig - self.bb_ns1_orig + 1:
raise ValueError("Unexpected slitlet2d_rect naxis2")
if inverse:
aij = self.tti_aij
bij = self.tti_bij
else:
aij = self.ttd_aij
bij = self.ttd_bij
# rectify image
slitlet2d_rect = rectify2d(
image2d=slitlet2d,
aij=aij,
bij=bij,
resampling=resampling
)
if abs(self.debugplot % 10) != 0:
if inverse:
self.ximshow_unrectified(slitlet2d_rect)
else:
self.ximshow_rectified(slitlet2d_rect)
return slitlet2d_rect
|
python
|
def get_requirements():
"""Get the dependencies."""
with open("requirements/project.txt") as f:
requirements = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
requirements.append(line)
return requirements
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.