Diff stringlengths 5 2k | FaultInducingLabel int64 0 1 |
|---|---|
import org.apache.hc.core5.util.Args; | 1 |
PCollection<Integer> output = outputs.apply(Flatten.pCollections()); | 0 |
/*
* $Source: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//dbcp/src/java/org/apache/commons/dbcp/datasources/PoolKey.java,v $
* $Revision: 1.2 $
* $Date: 2003/08/22 16:08:32 $
*
*
* Copyright (c) 1999-2003 The Apache Software Foundation. All rights
* 3. The end-user documentation included with the redistribution, if
* any, must include the following acknowledgement:
* Apache Software Foundation - http://www.apache.org/"
* Alternately, this acknowlegement may appear in the software itself,
* if and wherever such third-party acknowlegements normally appear.
* 4. The names "The Jakarta Project", "Commons", and "Apache Software
* Foundation" must not be used to endorse or promote products derived
* from this software without prior written permission. For written
* permission, please contact apache@apache.org.
* 5. Products derived from this software may not be called "Apache"
* nor may "Apache" appear in their names without prior written
* permission of the Apache Group.
* http://www.apache.org/
*
| 0 |
entry = new InternEntry<>(t, association); | 0 |
private boolean asymmetric = false;
return asymmetric;
if (key instanceof PrivateKey) {
this.asymmetric = true;
}
this.asymmetric = true; | 0 |
* or more contributor license agreements. See the NOTICE file
* regarding copyright ownership. The ASF licenses this file
* with the License. You may obtain a copy of the License at
* KIND, either express or implied. See the License for the | 0 |
storage,
// Safe to pass false here to default to the non-experimental task store
// during restore from backup procedure.
false /** useDbSnapshotForTaskStore */); | 0 |
// !!! for convenience, bring over jdk compatibility information from the stack
protected static final String VERSION_DEF_MIN_JDK = "VersionDefinition/min_jdk";
protected static final String VERSION_DEF_MAX_JDK = "VersionDefinition/max_jdk";
VERSION_DEF_MIN_JDK,
VERSION_DEF_MAX_JDK,
// !!! also set the version string to the name for uniqueness reasons. this is
// getting replaced during install packages anyway. this is just for the entity, the
// VDF should stay the same.
xmlHolder.entity.setVersion(properties.get(VERSION_DEF_DISPLAY_NAME).toString());
VERSION_DEF_MIN_JDK,
VERSION_DEF_MAX_JDK,
// !!! for dry run, make sure the version is the entity display
setResourceProperty(res, VERSION_DEF_FULL_VERSION, xmlHolder.entity.getVersion(), ids);
setResourceProperty(resource, VERSION_DEF_MIN_JDK, stack.getMinJdk(), requestedIds);
setResourceProperty(resource, VERSION_DEF_MAX_JDK, stack.getMaxJdk(), requestedIds);
setResourceProperty(resource, VERSION_DEF_MIN_JDK, stack.getMinJdk(), requestedIds);
setResourceProperty(resource, VERSION_DEF_MAX_JDK, stack.getMaxJdk(), requestedIds); | 0 |
import static com.google.cloud.dataflow.sdk.transforms.display.DisplayDataMatchers.hasNamespace;
public void testHasNamespace() {
Matcher<DisplayData> matcher = hasDisplayItem(hasNamespace(SampleTransform.class));
assertFalse(matcher.matches(DisplayData.from(
new PTransform<PCollection<String>, PCollection<String>>(){})));
assertThat(createDisplayDataWithItem("foo", "bar"), matcher);
}
@Test
return DisplayData.from(new SampleTransform(key, value));
}
static class SampleTransform extends PTransform<PCollection<String>, PCollection<String>> {
private final String key;
private final String value;
SampleTransform(String key, String value) {
this.key = key;
this.value = value;
}
@Override
public void populateDisplayData(Builder builder) {
builder.add(key, value);
} | 0 |
import java.util.HashMap;
import java.util.Map; | 0 |
* @param method the {@link AUTH_METHOD} to use
* @param username the user name
* @param password the password
public boolean auth(AUTH_METHOD method,
if (sendCommand(authCommand, method.getAuthName())
switch(method) {
case PLAIN:
// the server sends an empty response ("+ "), so we don't have to read it.
return sendCommand(
new String(
Base64.encodeBase64(("\000" + username + "\000" + password).getBytes())
)
) == POP3Reply.OK;
case CRAM_MD5:
// get the CRAM challenge
byte[] serverChallenge = Base64.decodeBase64(getReplyString().substring(2).trim());
// get the Mac instance
Mac hmac_md5 = Mac.getInstance("HmacMD5");
hmac_md5.init(new SecretKeySpec(password.getBytes(), "HmacMD5"));
// compute the result:
byte[] hmacResult = _convertToHexString(hmac_md5.doFinal(serverChallenge)).getBytes();
// join the byte arrays to form the reply
byte[] usernameBytes = username.getBytes();
byte[] toEncode = new byte[usernameBytes.length + 1 /* the space */ + hmacResult.length];
System.arraycopy(usernameBytes, 0, toEncode, 0, usernameBytes.length);
toEncode[usernameBytes.length] = ' ';
System.arraycopy(hmacResult, 0, toEncode, usernameBytes.length + 1, hmacResult.length);
// send the reply and read the server code:
return sendCommand(new String(Base64.encodeBase64(toEncode))) == POP3Reply.OK;
default:
return false;
CRAM_MD5("CRAM-MD5");
public final String getAuthName()
return this.methodName; | 0 |
* @version $Id$
public class UniqueFilterIteratorTest<E> extends AbstractIteratorTest<E> {
public UniqueFilterIteratorTest(String testName) { | 0 |
* @author <a href="mailto:dev@felix.apache.org">Felix Project Team</a> | 0 |
class BeamSqlExample { | 0 |
private final String __subject;
private final String __from;
private final String __to;
* Must not be null.
* May be null
* May be null
if (from == null) {
throw new IllegalArgumentException("From cannot be null");
}
header.append("From: ").append(__from).append("\n");
if (__to != null) {
header.append("To: ").append(__to).append("\n");
}
header.append("Cc: ").append(__cc.toString()).append("\n");
header.append("Subject: ").append(__subject).append("\n");
header.append('\n'); // end of headers; body follows | 0 |
* {@link #setNamespaceContextPointer(NodePointer) setNamespaceContextPointer}. | 0 |
ReadWriteIT.ingest(connector, getConnectionInfo(), ROWS, COLS, 50, 0, tableName);
ReadWriteIT.verify(connector, getConnectionInfo(), ROWS, COLS, 50, 0, tableName); | 0 |
labRat = (SequencedHashMap) makeEmptyMap();
public Map makeEmptyMap() {
} | 0 |
@Override
@Override | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.direct;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.runners.AppliedPTransform;
import org.apache.beam.sdk.values.PValue;
/** Test utilities for the {@link DirectRunner}. */
final class DirectGraphs {
public static void performDirectOverrides(Pipeline p) {
p.replaceAll(
DirectRunner.fromOptions(PipelineOptionsFactory.create().as(DirectOptions.class))
.defaultTransformOverrides());
}
public static DirectGraph getGraph(Pipeline p) {
DirectGraphVisitor visitor = new DirectGraphVisitor();
p.traverseTopologically(visitor);
return visitor.getGraph();
}
public static AppliedPTransform<?, ?, ?> getProducer(PValue value) {
return getGraph(value.getPipeline()).getProducer(value);
}
} | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class IteratorAdapter implements Iterator<Entry<Key,Value>> {
SortedKeyValueIterator<Key,Value> inner;
public IteratorAdapter(SortedKeyValueIterator<Key,Value> inner) {
public Entry<Key,Value> next() {
Entry<Key,Value> result = new KeyValue(new Key(inner.getTopKey()), new Value(inner.getTopValue()).get());
| 0 |
* {@link BasicConnFactory#BasicConnFactory(SSLSocketFactory, int, SocketConfig, ConnectionConfig)}.
* {@link BasicConnFactory#BasicConnFactory(int, SocketConfig, ConnectionConfig)}. | 0 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License. | 0 |
import org.apache.ambari.logsearch.config.api.LogSearchConfigServer;
import org.apache.ambari.logsearch.configurer.LogSearchConfigConfigurer;
private LogSearchConfigState logSearchConfigState;
@Inject
private LogSearchConfigConfigurer logSearchConfigConfigurer;
public LogSearchConfigServer getLogSearchConfig() {
return logSearchConfigConfigurer.getConfig();
}
| 0 |
private void tryClose(ServerSocketChannel s) {
try {
s.close();
} catch (IOException sse) {
LOG.error("Error while closing server socket.", sse);
}
}
public void reconfigure(InetSocketAddress addr) {
this.ss = ServerSocketChannel.open();
ss.socket().setReuseAddress(true);
LOG.info("binding to port " + addr);
ss.socket().bind(addr);
ss.configureBlocking(false);
acceptThread.setReconfiguring();
tryClose(oldSS);
acceptThread.wakeupSelector();
try {
acceptThread.join();
} catch (InterruptedException e) {
LOG.error("Error joining old acceptThread when reconfiguring client port {}",
e.getMessage());
Thread.currentThread().interrupt();
}
acceptThread = new AcceptThread(ss, addr, selectorThreads);
acceptThread.start();
LOG.error("Error reconfiguring client port to {} {}", addr, e.getMessage());
tryClose(oldSS);
| 0 |
@Override | 0 |
return entry.getValue() != null;
key2 = keyType == ReferenceStrength.HARD ? key2 : ((Reference<K>) key2).get();
return key1 == key2 || key1.equals(key2);
return (K) (parent.keyType == ReferenceStrength.HARD ? key : ((Reference<K>) key).get());
return (V) (parent.valueType == ReferenceStrength.HARD ? value : ((Reference<V>) value).get());
if (entryKey == null || entryValue == null) {
boolean r = parent.keyType != ReferenceStrength.HARD && key == ref;
r = r || parent.valueType != ReferenceStrength.HARD && value == ref;
index = parent.size() != 0 ? parent.data.length : 0;
while (e == null && i > 0) {
return nextKey == null || nextValue == null; | 0 |
/*
* Copyright 1999-2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cocoon.forms.formmodel;
import org.apache.avalon.framework.CascadingException;
/**
* @version $Id$
*/
public class IncompletenessException extends CascadingException {
private WidgetDefinition causingDefinition = null;
public IncompletenessException(String message, WidgetDefinition incomplete) {
super(message);
causingDefinition = incomplete;
}
public IncompletenessException(String message, WidgetDefinition incomplete , Exception e) {
super(message, e);
causingDefinition = incomplete;
}
public String toString() {
String msg = super.toString();
if(causingDefinition!=null)
msg += " (Caused by widget '"+causingDefinition.getId()+"', last modified at "+causingDefinition.getLocation()+")";
return msg;
}
} | 0 |
import org.apache.beam.runners.core.PerKeyCombineFnRunner; | 0 |
Map<String, String> checkProperties = new HashMap<>();
Map<String, Service> services = new HashMap<>();
Map<String, String> checkProperties = new HashMap<>();
Map<String, Service> services = new HashMap<>();
Map<String, String> checkProperties = new HashMap<>();
Map<String, Service> services = new HashMap<>();
Map<String, String> checkProperties = new HashMap<>();
Map<String, Service> services = new HashMap<>();
Map<String, String> checkProperties = new HashMap<>();
Map<String, Service> services = new HashMap<>();
Map<String, String> checkProperties = new HashMap<>(); | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.test.functional;
import org.apache.accumulo.minicluster.MiniAccumuloCluster;
import org.apache.accumulo.minicluster.MiniAccumuloConfig;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.rules.TemporaryFolder;
public class ConfigurableMacIT extends AbstractMacIT {
public static final Logger log = Logger.getLogger(ConfigurableMacIT.class);
public TemporaryFolder folder = new TemporaryFolder();
public MiniAccumuloCluster cluster;
@Before
public void setUp() throws Exception {
folder.create();
MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder(this.getClass().getSimpleName()), ROOT_PASSWORD);
configure(cfg);
cluster = new MiniAccumuloCluster(cfg);
cluster.start();
}
public void configure(MiniAccumuloConfig cfg) {
}
@After
public void tearDown() throws Exception {
cleanUp(cluster, folder);
}
@Override
public MiniAccumuloCluster getCluster() {
return cluster;
}
} | 0 |
private Charset charset;
/**
* @since 4.4
*/
public URIBuilder setCharset(final Charset charset) {
this.charset = charset;
return this;
}
/**
* @since 4.4
*/
public Charset getCharset() {
return charset;
}
this.queryParams = parseQuery(uri.getRawQuery(), this.charset != null ? this.charset : Consts.UTF_8);
return URLEncodedUtils.encUserInfo(userInfo, this.charset != null ? this.charset : Consts.UTF_8);
return URLEncodedUtils.encPath(path, this.charset != null ? this.charset : Consts.UTF_8);
return URLEncodedUtils.format(params, this.charset != null ? this.charset : Consts.UTF_8);
return URLEncodedUtils.encUric(fragment, this.charset != null ? this.charset : Consts.UTF_8);
this.queryParams = parseQuery(query, this.charset != null ? this.charset : Consts.UTF_8); | 0 |
/*
* Copyright 1999-2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cocoon.forms.event;
/**
* Interface to be implemented by Widgets on which ValueChangedListeners
* can be registered.
*/
public interface ValueChangedListenerEnabled {
public void addValueChangedListener(ValueChangedListener listener);
public void removeValueChangedListener(ValueChangedListener listener);
public boolean hasValueChangedListeners();
} | 0 |
FileSystem ns = fs.getFileSystemByPath(root); | 0 |
* Cache PCollection if SparkPipelineOptions.isCacheDisabled is false or transform isn't
* GroupByKey transformation and PCollection is used more then once in Pipeline.
public boolean shouldCache(PTransform<?, ? extends PValue> transform, PValue pvalue) {
if (serializableOptions.get().as(SparkPipelineOptions.class).isCacheDisabled()
|| transform instanceof GroupByKey) {
/**
* Add single output of transform to context map and possibly cache if it conforms {@link
* #shouldCache(PTransform, PValue)}.
*
* @param transform from which Dataset was created
* @param dataset created Dataset from transform
*/
/**
* Add output of transform to context map and possibly cache if it conforms {@link
* #shouldCache(PTransform, PValue)}. Used when PTransform has multiple outputs.
*
* @param pvalue one of multiple outputs of transform
* @param dataset created Dataset from transform
*/
/**
* Add output of transform to context map and possibly cache if it conforms {@link
* #shouldCache(PTransform, PValue)}.
*
* @param transform from which Dataset was created
* @param pvalue output of transform
* @param dataset created Dataset from transform
*/
if (shouldCache(transform, pvalue)) { | 0 |
import org.apache.hc.core5.http.nio.command.RequestExecutionCommand;
ioSession.enqueue(new RequestExecutionCommand(exchangeHandler, context), Command.Priority.NORMAL); | 0 |
import org.apache.cocoon.woody.util.I18nMessage;
validationError = new ValidationError(new I18nMessage("multivaluefield.conversionfailed", Constants.I18N_CATALOGUE)); | 0 |
import org.apache.atlas.AtlasErrorCode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
throw new AtlasBaseException(AtlasErrorCode.INCOMPATIBLE_SUPERTYPE, superTypeName,
entityDef.getName());
throw new AtlasBaseException(AtlasErrorCode.CIRCULAR_REFERENCE, entityDef.getName(), subTypes.toString()); | 0 |
package org.apache.ambari.server.stack.upgrade;
import org.apache.ambari.server.stack.upgrade.UpgradePack.OrderService;
import org.apache.ambari.server.stack.upgrade.UpgradePack.ProcessingComponent;
import org.apache.ambari.server.stack.upgrade.orchestrate.StageWrapper;
import org.apache.ambari.server.stack.upgrade.orchestrate.StageWrapperBuilder;
import org.apache.ambari.server.stack.upgrade.orchestrate.TaskWrapper;
import org.apache.ambari.server.stack.upgrade.orchestrate.TaskWrapperBuilder;
import org.apache.ambari.server.stack.upgrade.orchestrate.UpgradeContext; | 0 |
package org.apache.accumulo.test.mapred;
import org.apache.accumulo.core.client.mapred.AccumuloInputFormat;
import org.apache.accumulo.core.client.mapred.AccumuloRowInputFormat;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.harness.AccumuloClusterHarness;
import org.junit.BeforeClass;
public class AccumuloRowInputFormatIT extends AccumuloClusterHarness {
private static List<Entry<Key,Value>> row1;
private static List<Entry<Key,Value>> row2;
private static List<Entry<Key,Value>> row3;
@BeforeClass
public static void prepareRows() {
private static void checkLists(final List<Entry<Key,Value>> first, final Iterator<Entry<Key,Value>> second) {
private static void insertList(final BatchWriter writer, final List<Entry<Key,Value>> list) throws MutationsRejectedException {
if (args.length != 1) {
throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <table>");
String user = getAdminPrincipal();
AuthenticationToken pass = getAdminToken();
String table = args[0];
AccumuloInputFormat.setConnectorInfo(job, user, pass);
AccumuloRowInputFormat.setZooKeeperInstance(job, getCluster().getClientConfig());
final Connector conn = getConnector();
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
writer = conn.createBatchWriter(tableName, new BatchWriterConfig());
MRTester.main(new String[] {tableName}); | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 1 |
/*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.hc.client5.http.client;
import java.io.IOException;
import org.apache.hc.core5.annotation.Immutable;
/**
* Signals an error in the HTTP protocol.
*
* @since 4.0
*/
@Immutable
public class ClientProtocolException extends IOException {
private static final long serialVersionUID = -5596590843227115865L;
public ClientProtocolException() {
super();
}
public ClientProtocolException(final String s) {
super(s);
}
public ClientProtocolException(final Throwable cause) {
initCause(cause);
}
public ClientProtocolException(final String message, final Throwable cause) {
super(message);
initCause(cause);
}
} | 0 |
* @param <E> the type of elements in this collection | 0 |
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//jxpath/src/java/org/apache/commons/jxpath/ri/axes/RootContext.java,v 1.14 2004/01/18 01:43:29 dmitri Exp $
* $Revision: 1.14 $
* $Date: 2004/01/18 01:43:29 $
* @version $Revision: 1.14 $ $Date: 2004/01/18 01:43:29 $
pointer = NodePointer.newNodePointer(
new QName(null, ""),
constant,
null,
jxpathContext.getNamespaceManager()); | 0 |
public void execute(Storage.MutableStoreProvider storeProvider) {
public void execute(Storage.MutableStoreProvider storeProvider) {
public void execute(Storage.MutableStoreProvider storeProvider) {
public void execute(Storage.MutableStoreProvider storeProvider) {
public void execute(Storage.MutableStoreProvider storeProvider) { | 0 |
private void configTest(final XMLConfiguration config)
final FileHandler handler = new FileHandler(config);
final FileHandler handler = new FileHandler(config);
final FileHandler handler = new FileHandler(config);
final FileHandler handler = new FileHandler(config);
final FileHandler handler = new FileHandler(config);
final Iterator<String> iter = config.getKeys();
final FileHandler handler = new FileHandler(config);
final File saveFile = folder.newFile(TEST_SAVENAME);
final FileHandler handler2 = new FileHandler(config);
final File saveFile = folder.newFile(TEST_SAVENAME);
final File saveFile = folder.newFile(TEST_SAVENAME);
final FileHandler handler = new FileHandler(config); | 0 |
import junit.framework.Test;
import org.apache.commons.collections4.BulkTest;
public static Test suite() {
return BulkTest.makeSuite(TreeBagTest.class);
}
//-----------------------------------------------------------------------
// Bag<T> bag = makeObject();
// bag = makeFullCollection(); | 0 |
public final class TimeInterval extends Window implements Comparable<TimeInterval> {
private final long endMillis;
public TimeInterval(long startMillis, long endMillis) {
this.endMillis = endMillis;
return endMillis;
}
/**
* Returns {@code true} if this window intersects the given window.
*/
boolean intersects(TimeInterval that) {
return this.startMillis < that.endMillis
&& this.endMillis > that.startMillis;
/**
* Returns the minimal window covers both this window and the given window.
*/
TimeInterval cover(TimeInterval that) {
return new TimeInterval(
Math.min(this.startMillis, that.startMillis),
Math.max(this.endMillis, that.endMillis));
if (this == o) return true;
if (!(o instanceof TimeInterval)) return false;
TimeInterval that = (TimeInterval) o;
return startMillis == that.startMillis && endMillis == that.endMillis;
int result = (int) (startMillis ^ (startMillis >>> 32));
result = 31 * result + (int) (endMillis ^ (endMillis >>> 32));
return result;
", endMillis=" + endMillis +
cmp = endMillis - o.endMillis; | 0 |
* http://www.apache.org/licenses/LICENSE-2.0 | 0 |
import org.apache.accumulo.core.data.ArrayByteSequence;
import com.google.common.base.Preconditions;
private static final ArrayByteSequence COLF_BYTE_SEQ = new ArrayByteSequence(COLF.toString());
/**
* Extract the table ID from the colfam (inefficiently if called repeatedly)
* @param k Key to extract from
* @return The table ID
* @see #getTableId(Key,Text)
*/
public static String getTableId(Key k) {
Text buff = new Text();
getTableId(k, buff);
return buff.toString();
}
/**
* Extract the table ID from the colfam into the given {@link Text}
* @param k Key to extract from
* @param buff Text to place table ID into
*/
public static void getTableId(Key k, Text buff) {
Preconditions.checkNotNull(k);
Preconditions.checkNotNull(buff);
k.getColumnQualifier(buff);
}
/**
* Extract the file name from the row suffix into the given {@link Text}
* @param k Key to extract from
* @param buff Text to place file name into
*/
public static void getFile(Key k, Text buff) {
Preconditions.checkNotNull(k);
Preconditions.checkNotNull(buff);
Preconditions.checkArgument(COLF_BYTE_SEQ.equals(k.getColumnFamilyData()), "Given metadata replication status key with incorrect colfam");
k.getRow(buff);
} | 0 |
import org.apache.http.protocol.HttpExpectationVerifier;
private HttpExpectationVerifier expectationVerifier;
public void setExpectationVerifier(final HttpExpectationVerifier expectationVerifier) {
this.expectationVerifier = expectationVerifier;
}
if (!ver.lessEquals(HttpVersion.HTTP_1_1)) {
// Downgrade protocol version if greater than HTTP/1.1
ver = HttpVersion.HTTP_1_1;
}
HttpResponse ack = this.responseFactory.newHttpResponse(
ver, HttpStatus.SC_CONTINUE, context);
ack.getParams().setDefaults(this.params);
if (this.expectationVerifier != null) {
this.expectationVerifier.verify(request, ack, context);
} | 0 |
import org.apache.ambari.server.state.CheckHelper;
import org.apache.ambari.spi.ClusterInformation;
import org.apache.ambari.spi.RepositoryType;
import org.apache.ambari.spi.RepositoryVersion;
import org.apache.ambari.spi.upgrade.UpgradeCheckRequest;
import org.apache.ambari.spi.upgrade.UpgradeCheckResult;
import org.apache.ambari.spi.upgrade.UpgradeCheckStatus;
import org.apache.ambari.spi.upgrade.UpgradeType;
private RepositoryVersion m_repositoryVersion;
@Mock
private RepositoryVersionEntity m_repositoryVersionEntity;
private MockCheckHelper m_checkHelper = new MockCheckHelper();
Mockito.when(m_repositoryVersion.getId()).thenReturn(1L);
Mockito.when(m_repositoryVersion.getRepositoryType()).thenReturn(RepositoryType.STANDARD);
Mockito.when(m_repositoryVersionEntity.getRepositoryXml()).thenReturn(m_vdfXml);
m_checkHelper.m_clusters = clusters;
Mockito.when(m_checkHelper.m_repositoryVersionDAO.findByPK(Mockito.anyLong())).thenReturn(m_repositoryVersionEntity);
m_requiredServicesCheck.checkHelperProvider = new Provider<CheckHelper>() {
@Override
public CheckHelper get() {
return m_checkHelper;
}
};
ClusterInformation clusterInformation = new ClusterInformation(CLUSTER_NAME, false, null, null);
UpgradeCheckRequest request = new UpgradeCheckRequest(clusterInformation, UpgradeType.ROLLING,
m_repositoryVersion, null);
UpgradeCheckResult check = m_requiredServicesCheck.perform(request);
Assert.assertEquals(UpgradeCheckStatus.PASS, check.getStatus());
ClusterInformation clusterInformation = new ClusterInformation(CLUSTER_NAME, false, null, null);
UpgradeCheckRequest request = new UpgradeCheckRequest(clusterInformation, UpgradeType.ROLLING,
m_repositoryVersion, null);
UpgradeCheckResult check = m_requiredServicesCheck.perform(request);
Assert.assertEquals(UpgradeCheckStatus.FAIL, check.getStatus()); | 0 |
package org.apache.batik.gvt.filter; | 0 |
public boolean isRegistered(String sessionId) {
return registeredHosts.containsKey(sessionId);
}
| 1 |
* Copyright 2016 Seznam.cz, a.s. | 0 |
@Test
public void clearSupported() {
final WeakList<Object> list = new WeakList<Object>();
list.add("hello");
assertEquals(1, list.size());
list.clear();
assertEquals(0, list.size());
}
| 0 |
"This service is removed from the new release and must be removed before the upgrade can continue.").build()); | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
CodedTupleTag<T> codedTriggerIdTag = codedTriggerIdTag(tag, window);
keyedState.lookup(codedTriggerIdTag);
keyedState.store(codedTriggerIdTag, value);
CodedTupleTag<T> codedTriggerIdTag = codedTriggerIdTag(tag, window);
keyedState.lookup(codedTriggerIdTag);
keyedState.remove(codedTriggerIdTag); | 0 |
final String normalizedHost = host.toLowerCase(Locale.ROOT);
final String normalizedCn = cn.toLowerCase(Locale.ROOT);
if (!matchIdentityStrict(normalizedHost, normalizedCn, publicSuffixMatcher)) { | 0 |
import java.util.LinkedHashMap;
layoutData = new LinkedHashMap(); | 0 |
import org.apache.commons.vfs.util.UserAuthenticatorUtils;
public static HttpClient createConnection(String hostname, int port, char[] username, char[] password, FileSystemOptions fileSystemOptions) throws FileSystemException
final HttpURL url = new HttpURL(
UserAuthenticatorUtils.toString(username),
UserAuthenticatorUtils.toString(password), | 0 |
passwords = ClientSession.passwordIteratorOf(session); | 0 |
import org.apache.aurora.scheduler.async.preemptor.PreemptorModule;
install(new PreemptorModule()); | 0 |
/**
* Copyright 2016 Seznam a.s.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 0 |
import com.google.cloud.dataflow.sdk.transforms.Combine;
* Wrapper that wraps a {@link com.google.cloud.dataflow.sdk.transforms.Combine.CombineFn}
public class SerializableFnAggregatorWrapper<AI, AO> implements Aggregator<AI, AO>, Accumulator<AI, Serializable> {
private Combine.CombineFn<AI, ?, AO> combiner;
public SerializableFnAggregatorWrapper(Combine.CombineFn<AI, ?, AO> combiner) {
this.combiner = combiner;
this.aa = combiner.apply(ImmutableList.of((AI) aa, value));
this.aa = combiner.apply(ImmutableList.<AI>of());
this.aa = combiner.apply(ImmutableList.of((AI) aa, (AI) other.getLocalValue()));
public String getName() {
return "Aggregator :" + combiner.toString();
}
@Override
public Combine.CombineFn<AI, ?, AO> getCombineFn() {
return combiner;
}
@Override
AO resultCopy = combiner.apply(Lists.newArrayList((AI) aa));
SerializableFnAggregatorWrapper<>(combiner); | 0 |
this.uniquePoolEntry = new PoolEntry();
uniquePoolEntry = new PoolEntry();
protected PoolEntry() {
super(SingleClientConnManager.this.connOperator, null); | 0 |
@Override
@Override
@Override
@Override | 0 |
* @param pool {@link KeyedObjectPool} of {@link PreparedStatement}s | 0 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.view.huetoambarimigration.datasource.queryset.ambariqueryset.hive.historyqueryset;
public class PostgressQuerySetAmbariDB extends QuerySetAmbariDB {
} | 0 |
@org.junit.AfterClass
public static void cleanup() throws Exception {
Security.removeProvider("org.bouncycastle.jce.provider.BouncyCastleProvider");
}
| 0 |
* This is called by the ResultPostProcessor during post processing of a result.
* This is called by the ResultPostProcessor during post processing of a result. | 0 |
@Override
@Override | 0 |
/** Used in user defined functions to access environment methods. */
public interface Context extends Environment {} | 0 |
/*
* Copyright (C) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.dataflow.sdk.runners.worker.status;
import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Respond to /healthz with "ok".
*/
public class HealthzServlet extends BaseStatusServlet {
public HealthzServlet() {
super("healthz");
}
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws IOException {
response.setContentType("text/html;charset=utf-8");
response.setStatus(HttpServletResponse.SC_OK);
response.getWriter().println("ok");
}
} | 0 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.swssf.xmlsec.ext;
import org.swssf.xmlsec.ext.stax.XMLSecEvent;
import javax.xml.stream.XMLStreamException;
import java.io.OutputStream;
import java.util.List;
/**
* @author $Author$
* @version $Revision$ $Date$
*/
public interface Transformer {
void setOutputStream(OutputStream outputStream) throws XMLSecurityException;
void setList(List list) throws XMLSecurityException;
void setTransformer(Transformer transformer) throws XMLSecurityException;
void transform(XMLSecEvent xmlSecEvent) throws XMLStreamException;
} | 0 |
public CliCommand parse(String[] cmdArgs) throws CliParseException {
CommandLine cl;
try {
cl = parser.parse(options, cmdArgs);
} catch (ParseException ex) {
throw new CliParseException(ex);
}
throw new CliParseException(getUsageStr());
public boolean exec() throws CliException {
try {
ZKUtil.deleteRecursive(zk, path);
} catch (KeeperException|InterruptedException ex) {
throw new CliWrapperException(ex);
} | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final String[] parts = address.split(":", 2);
if (parts.length == 2) {
if (parts[1].isEmpty()) return new InetSocketAddress(parts[0], defaultPort);
return new InetSocketAddress(parts[0], Integer.parseInt(parts[1]));
}
return new InetSocketAddress(address, defaultPort);
}
return parseAddress(address.toString(), defaultPort);
}
InetSocketAddress addr = parseAddress(address, defaultPort);
return new TSocket(addr.getHostName(), addr.getPort());
}
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
}
| 0 |
private boolean includeClassificationAttributes;
* @return True if classification attributes are included in search result.
*/
public boolean getIncludeClassificationAttributes() {
return includeClassificationAttributes;
}
/**
* Include classificatio attributes in search result.
* @param includeClassificationAttributes boolean flag
*/
public void setIncludeClassificationAttributes(boolean includeClassificationAttributes) {
this.includeClassificationAttributes = includeClassificationAttributes;
}
/**
includeClassificationAttributes == that.includeClassificationAttributes &&
return Objects.hash(query, typeName, classification, excludeDeletedEntities, includeClassificationAttributes,
limit, offset, entityFilters, tagFilters, attributes);
sb.append(", includeClassificationAttributes=").append(includeClassificationAttributes); | 0 |
private static final transient Logger log = LoggerFactory.getLogger(LogInputProcessor.class);
log.trace(stringWriter.toString()); | 0 |
String server = req.getParameter("server"); | 0 |
import org.apache.accumulo.server.AccumuloServerContext;
void removeFromMetadataTable(AccumuloServerContext context) throws Exception {
MetadataTableUtil.getMetadataTable(context).update(m);
void saveToMetadataTable(AccumuloServerContext context) throws Exception {
MetadataTableUtil.getMetadataTable(context).update(m);
| 1 |
DataflowPipelineWorkerPoolOptions, BigQueryOptions, | 0 |
.setInstructionId("1L")
.setInstructionId("1L")
.setInstructionId("2L")
.setInstructionId("2L")
.setInstructionId("3L")
.setInstructionId("3L") | 0 |
"Registering consumer for instruction {} and transform {}",
inputLocation.getPTransformId());
"Creating output consumer for instruction {} and transform {}",
outputLocation.getPTransformId()); | 0 |
import java.util.function.Predicate;
if (filter.test(entry.getKey())) { | 0 |
@Parameter(names = "--vis")
| 0 |
* http://www.apache.org/licenses/LICENSE-2.0 | 0 |
/*****************************************************************************
* Copyright (C) The Apache Software Foundation. All rights reserved. *
* ------------------------------------------------------------------------- *
* This software is published under the terms of the Apache Software License *
* version 1.1, a copy of which has been included with this distribution in *
* the LICENSE file. *
*****************************************************************************/
package org.apache.batik.css.event;
import org.w3c.dom.css.CSSValue;
/**
* This interface must be implemented by the objects that must be
* notified of CSS values changes.
*
* @author <a href="mailto:stephane@hillion.org">Stephane Hillion</a>
* @version $Id$
*/
public interface CSSValueChangeListener {
/**
* Called when a CSS value is changed.
* @param property The name of the CSS property the value represents.
* @param before The value before it changes.
* @param after The value after it changes.
*/
void cssValueChange(String property, CSSValue before, CSSValue after);
} | 1 |
*
*
*
* @version CVS $Id$ | 0 |
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//collections/src/java/org/apache/commons/collections/primitives/adapters/Attic/LongIteratorIterator.java,v 1.3 2003/11/07 20:08:15 rwaldhoff Exp $
*
* @deprecated This code has been moved to Jakarta Commons Primitives (http://jakarta.apache.org/commons/primitives/)
* @version $Revision: 1.3 $ $Date: 2003/11/07 20:08:15 $ | 0 |
import org.junit.Test;
public class MethodPropertyTest {
@Test | 0 |
flinkStreamEnv.getCheckpointConfig().setCheckpointTimeout(
options.getCheckpointTimeoutMillis()); | 0 |
import org.apache.sshd.common.util.NumberUtils;
default void update(byte[] input) throws Exception {
update(input, 0, NumberUtils.length(input));
} | 0 |
* @param paramTypeNames The Java classes names that represent the parameter types of the method arguments | 0 |
* Copyright 2001-2004 The Apache Software Foundation.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. | 0 |
* TODO(wfarner): Move this and other mass-delete methods to an interface that is only
* accessible by SnapshotStoreImpl. | 0 |
import java.io.InterruptedIOException;
import org.apache.http.nio.reactor.IOEventDispatch;
private IOReactorExceptionHandler exceptionHandler = null;
private IOEventDispatch eventDispatch = null;
public void execute(
final IOEventDispatch eventDispatch) throws InterruptedIOException, IOReactorException {
if (eventDispatch == null) {
throw new IllegalArgumentException("Event dispatcher may not be null");
}
this.eventDispatch = eventDispatch;
execute();
}
protected void handleRuntimeException(final RuntimeException ex) {
if (this.exceptionHandler == null || !this.exceptionHandler.handle(ex)) {
throw ex;
try {
this.eventDispatch.inputReady(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
try {
this.eventDispatch.outputReady(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
try {
this.eventDispatch.inputReady(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
try {
this.eventDispatch.timeout(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
try {
this.eventDispatch.connected(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
protected void sessionClosed(final IOSession session) {
try {
this.eventDispatch.disconnected(session);
} catch (RuntimeException ex) {
handleRuntimeException(ex);
}
} | 0 |
import java.net.IDN;
return IDN.toASCII(input); | 0 |
private void runTest(List<Range> ranges, TabletLocatorImpl tab1TabletCache,
runTest(ranges, tab1TabletCache, expected, failures);
private void runTest(List<Range> ranges, TabletLocatorImpl tab1TabletCache,
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected4, nrl());
runTest(ranges, metaCache, expected5, nrl());
runTest(ranges, metaCache, expected6, nrl());
runTest(ranges, metaCache, expected7, nrl());
runTest(ranges, metaCache, expected8, nrl());
runTest(ranges, metaCache, expected9, nrl());
runTest(ranges, metaCache, expected1);
runTest(ranges, metaCache, expected2, ranges);
runTest(ranges, metaCache, expected1, nrl(new Range(new Text("2"))));
runTest(ranges, metaCache, expected3, nrl(nr("0", "2")));
runTest(ranges, metaCache, expected4,
runTest(ranges, metaCache, expected1);
runTest(ranges, metaCache, expected2,
runTest(ranges, metaCache, expected3);
runTest(ranges, metaCache, expected);
runTest(ranges, metaCache, expected, nrl(nr("b", "o"), nr("r", "z")));
runTest(ranges, metaCache, expected, nrl(new Range("a"), nr("b", "o"), nr("r", "z")));
runTest(ranges, metaCache, expected, nrl(new Range("a"), nr("b", "o"), nr("r", "z")));
runTest(ranges, metaCache, expected, nrl(nr("b", "o"), nr("r", "z")));
runTest(ranges, metaCache, expected); | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.