index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/CacheableFeatureProvider.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.feature;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.lang.StringUtils;
/**
* Cacheable Feature Provider.
*/
public abstract class CacheableFeatureProvider<T extends Feature> implements FeatureProvider {
protected final String scope;
protected final ConcurrentMap<String, FeatureProvider> scopes =
new ConcurrentHashMap<String, FeatureProvider>();
protected final ConcurrentMap<String, T> features =
new ConcurrentHashMap<String, T>();
protected CacheableFeatureProvider(String scope) {
this.scope = scope;
}
protected String makeName(String name) {
if (StringUtils.isBlank(scope)) {
return name;
} else {
return scope + "." + name;
}
}
@Override
public T getFeature(String name) {
T feature = features.get(name);
if (null == feature) {
T newFeature = makeFeature(makeName(name));
T oldFeature = features.putIfAbsent(name, newFeature);
if (null == oldFeature) {
feature = newFeature;
} else {
feature = oldFeature;
}
}
return feature;
}
protected abstract T makeFeature(String featureName);
@Override
public FeatureProvider scope(String name) {
FeatureProvider provider = scopes.get(name);
if (null == provider) {
FeatureProvider newProvider = makeProvider(makeName(name));
FeatureProvider oldProvider = scopes.putIfAbsent(name, newProvider);
if (null == oldProvider) {
provider = newProvider;
} else {
provider = oldProvider;
}
}
return provider;
}
protected abstract FeatureProvider makeProvider(String fullScopeName);
}
| 300 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.feature;
/**
* A feature implementation that allow changing availability programmatically.
*/
public class SettableFeature extends FixedValueFeature {
public SettableFeature(String name, int initialAvailability) {
super(name, initialAvailability);
}
public SettableFeature(String name, boolean isAvailabile) {
super(name, isAvailabile);
}
public void set(int availability) {
this.availability = availability;
}
public void set(boolean isAvailabile) {
this.availability = isAvailabile ? FEATURE_AVAILABILITY_MAX_VALUE : 0;
}
}
| 301 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/FeatureProvider.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.feature;
/**
* Provider to provide features.
*/
public interface FeatureProvider {
/**
* Return the feature with given name.
*
* @param name feature name
* @return feature instance
*/
Feature getFeature(String name);
/**
* Provide the feature provider under scope <i>name</i>.
*
* @param name
* scope name.
* @return feature provider under scope <i>name</i>
*/
FeatureProvider scope(String name);
}
| 302 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/Feature.java | package org.apache.bookkeeper.feature;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
/**
* This interface represents a feature.
*/
public interface Feature {
int FEATURE_AVAILABILITY_MAX_VALUE = 100;
/**
* Returns a textual representation of the feature.
*
* @return name of the feature.
*/
String name();
/**
* Returns the availability of this feature, an integer between 0 and 100.
*
* @return the availability of this feature.
*/
int availability();
/**
* Whether this feature is available or not.
*
* @return true if this feature is available, otherwise false.
*/
boolean isAvailable();
}
| 303 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* A <a href="https://en.wikipedia.org/wiki/Feature_toggle">feature-flag</a> system
* that is used to proportionally control what features are enabled for the system.
*
* <p>In other words, it is a way of altering the control in a system without restarting it.
* It can be used during all stages of developement, its most visible use case is on production.
* For instance, during a production release, you can enable or disable individual features,
* control the data flow through the system, thereby minimizing risk of system failures
* in real time.
*
* <p>The <i>feature provider</i> interface is pluggable and easy to integrate with
* any configuration management system.
*/
package org.apache.bookkeeper.feature; | 304 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/versioning/LongVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.versioning;
import lombok.EqualsAndHashCode;
/**
* A version object holds integer version.
*/
@EqualsAndHashCode
public class LongVersion implements Version {
protected long version;
public LongVersion(long v) {
this.version = v;
}
@Override
public Occurred compare(Version v) {
if (null == v) {
throw new NullPointerException("Version is not allowed to be null.");
}
if (v == Version.NEW) {
return Occurred.AFTER;
} else if (v == Version.ANY) {
return Occurred.CONCURRENTLY;
} else if (!(v instanceof LongVersion)) {
throw new IllegalArgumentException("Invalid version type");
}
LongVersion zv = (LongVersion) v;
int res = Long.compare(version, zv.version);
if (res == 0) {
return Occurred.CONCURRENTLY;
} else if (res < 0) {
return Occurred.BEFORE;
} else {
return Occurred.AFTER;
}
}
public long getLongVersion() {
return version;
}
public LongVersion setLongVersion(long v) {
this.version = v;
return this;
}
@Override
public String toString() {
return Long.toString(version, 10);
}
}
| 305 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/versioning/Versioned.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.versioning;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* A <code>Versioned</code> value represents a value associated with a version.
*
* @param <T> value type.
*/
@Data
@AllArgsConstructor
public class Versioned<T> {
T value;
Version version;
}
| 306 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/versioning/Version.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.versioning;
/**
* An interface that allows us to determine if a given version happened before or after another version.
*/
public interface Version {
/**
* Initial version.
*/
Version NEW = new Version() {
@Override
public Occurred compare(Version v) {
if (null == v) {
throw new NullPointerException("Version is not allowed to be null.");
}
if (this == v) {
return Occurred.CONCURRENTLY;
}
return Occurred.BEFORE;
}
};
/**
* Match any version.
*/
Version ANY = v -> {
if (null == v) {
throw new NullPointerException("Version is not allowed to be null.");
}
return Occurred.CONCURRENTLY;
};
/**
* Define the sequence of versions.
*/
enum Occurred {
BEFORE, AFTER, CONCURRENTLY
}
Occurred compare(Version v);
}
| 307 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/versioning/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* a versioning library provides an abstraction over versioned data.
*/
package org.apache.bookkeeper.versioning; | 308 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.conf;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.util.BookKeeperConstants.FEATURE_DISABLE_ENSEMBLE_CHANGE;
import io.netty.buffer.ByteBuf;
import java.util.NoSuchElementException;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.EnsemblePlacementPolicy;
import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy;
import org.apache.bookkeeper.client.api.BookKeeperBuilder;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.discover.ZKRegistrationClient;
import org.apache.bookkeeper.replication.Auditor;
import org.apache.commons.configuration.ConfigurationException;
/**
* Configuration settings for client side.
*/
public class ClientConfiguration extends AbstractConfiguration<ClientConfiguration> {
// Throttle value
protected static final String THROTTLE = "throttle";
// Digest Type
protected static final String DIGEST_TYPE = "digestType";
protected static final String ENABLE_DIGEST_TYPE_AUTODETECTION = "enableDigestTypeAutodetection";
// Passwd
protected static final String PASSWD = "passwd";
// Client TLS (@deprecated since 4.7.0)
/**
* @deprecated Use {@link #TLS_KEYSTORE_TYPE}
*/
@Deprecated
protected static final String CLIENT_TLS_KEYSTORE_TYPE = "clientKeyStoreType";
/**
* @deprecated Use {@link #TLS_KEYSTORE}
*/
@Deprecated
protected static final String CLIENT_TLS_KEYSTORE = "clientKeyStore";
/**
* @deprecated Use {@link #TLS_KEYSTORE_PASSWORD_PATH}
*/
@Deprecated
protected static final String CLIENT_TLS_KEYSTORE_PASSWORD_PATH = "clientKeyStorePasswordPath";
/**
* @deprecated Use {@link #TLS_TRUSTSTORE_TYPE}
*/
@Deprecated
protected static final String CLIENT_TLS_TRUSTSTORE_TYPE = "clientTrustStoreType";
/**
* @deprecated Use {@link #TLS_TRUSTSTORE}
*/
@Deprecated
protected static final String CLIENT_TLS_TRUSTSTORE = "clientTrustStore";
/**
* @deprecated Use {@link #TLS_TRUSTSTORE_PASSWORD_PATH}
*/
@Deprecated
protected static final String CLIENT_TLS_TRUSTSTORE_PASSWORD_PATH = "clientTrustStorePasswordPath";
// NIO Parameters
protected static final String CLIENT_TCP_NODELAY = "clientTcpNoDelay";
protected static final String CLIENT_SOCK_KEEPALIVE = "clientSockKeepalive";
protected static final String CLIENT_SENDBUFFER_SIZE = "clientSendBufferSize";
protected static final String CLIENT_RECEIVEBUFFER_SIZE = "clientReceiveBufferSize";
protected static final String CLIENT_WRITEBUFFER_LOW_WATER_MARK = "clientWriteBufferLowWaterMark";
protected static final String CLIENT_WRITEBUFFER_HIGH_WATER_MARK = "clientWriteBufferHighWaterMark";
protected static final String CLIENT_CONNECT_TIMEOUT_MILLIS = "clientConnectTimeoutMillis";
protected static final String CLIENT_TCP_USER_TIMEOUT_MILLIS = "clientTcpUserTimeoutMillis";
protected static final String NUM_CHANNELS_PER_BOOKIE = "numChannelsPerBookie";
protected static final String USE_V2_WIRE_PROTOCOL = "useV2WireProtocol";
protected static final String NETTY_USE_POOLED_BUFFERS = "nettyUsePooledBuffers";
// Read Parameters
protected static final String READ_TIMEOUT = "readTimeout";
protected static final String SPECULATIVE_READ_TIMEOUT = "speculativeReadTimeout";
protected static final String FIRST_SPECULATIVE_READ_TIMEOUT = "firstSpeculativeReadTimeout";
protected static final String MAX_SPECULATIVE_READ_TIMEOUT = "maxSpeculativeReadTimeout";
protected static final String SPECULATIVE_READ_TIMEOUT_BACKOFF_MULTIPLIER =
"speculativeReadTimeoutBackoffMultiplier";
protected static final String FIRST_SPECULATIVE_READ_LAC_TIMEOUT = "firstSpeculativeReadLACTimeout";
protected static final String MAX_SPECULATIVE_READ_LAC_TIMEOUT = "maxSpeculativeReadLACTimeout";
protected static final String SPECULATIVE_READ_LAC_TIMEOUT_BACKOFF_MULTIPLIER =
"speculativeReadLACTimeoutBackoffMultiplier";
protected static final String ENABLE_PARALLEL_RECOVERY_READ = "enableParallelRecoveryRead";
protected static final String RECOVERY_READ_BATCH_SIZE = "recoveryReadBatchSize";
protected static final String REORDER_READ_SEQUENCE_ENABLED = "reorderReadSequenceEnabled";
protected static final String STICKY_READS_ENABLED = "stickyReadSEnabled";
// Add Parameters
protected static final String OPPORTUNISTIC_STRIPING = "opportunisticStriping";
protected static final String DELAY_ENSEMBLE_CHANGE = "delayEnsembleChange";
protected static final String MAX_ALLOWED_ENSEMBLE_CHANGES = "maxNumEnsembleChanges";
// Timeout Setting
protected static final String ADD_ENTRY_TIMEOUT_SEC = "addEntryTimeoutSec";
protected static final String ADD_ENTRY_QUORUM_TIMEOUT_SEC = "addEntryQuorumTimeoutSec";
protected static final String READ_ENTRY_TIMEOUT_SEC = "readEntryTimeoutSec";
protected static final String TIMEOUT_MONITOR_INTERVAL_SEC = "timeoutMonitorIntervalSec";
protected static final String TIMEOUT_TASK_INTERVAL_MILLIS = "timeoutTaskIntervalMillis";
protected static final String EXPLICIT_LAC_INTERVAL = "explicitLacInterval";
protected static final String PCBC_TIMEOUT_TIMER_TICK_DURATION_MS = "pcbcTimeoutTimerTickDurationMs";
protected static final String PCBC_TIMEOUT_TIMER_NUM_TICKS = "pcbcTimeoutTimerNumTicks";
protected static final String TIMEOUT_TIMER_TICK_DURATION_MS = "timeoutTimerTickDurationMs";
protected static final String TIMEOUT_TIMER_NUM_TICKS = "timeoutTimerNumTicks";
// backpressure configuration
protected static final String WAIT_TIMEOUT_ON_BACKPRESSURE = "waitTimeoutOnBackpressureMs";
// Bookie health check settings
protected static final String BOOKIE_HEALTH_CHECK_ENABLED = "bookieHealthCheckEnabled";
protected static final String BOOKIE_HEALTH_CHECK_INTERVAL_SECONDS = "bookieHealthCheckIntervalSeconds";
protected static final String BOOKIE_ERROR_THRESHOLD_PER_INTERVAL = "bookieErrorThresholdPerInterval";
protected static final String BOOKIE_QUARANTINE_TIME_SECONDS = "bookieQuarantineTimeSeconds";
protected static final String BOOKIE_QUARANTINE_RATIO = "bookieQuarantineRatio";
// Bookie info poll interval
protected static final String DISK_WEIGHT_BASED_PLACEMENT_ENABLED = "diskWeightBasedPlacementEnabled";
protected static final String GET_BOOKIE_INFO_INTERVAL_SECONDS = "getBookieInfoIntervalSeconds";
protected static final String GET_BOOKIE_INFO_RETRY_INTERVAL_SECONDS = "getBookieInfoRetryIntervalSeconds";
protected static final String BOOKIE_MAX_MULTIPLE_FOR_WEIGHTED_PLACEMENT =
"bookieMaxMultipleForWeightBasedPlacement";
protected static final String GET_BOOKIE_INFO_TIMEOUT_SECS = "getBookieInfoTimeoutSecs";
protected static final String START_TLS_TIMEOUT_SECS = "startTLSTimeoutSecs";
protected static final String TLS_HOSTNAME_VERIFICATION_ENABLED = "tlsHostnameVerificationEnabled";
// Number of Threads
protected static final String NUM_WORKER_THREADS = "numWorkerThreads";
protected static final String NUM_IO_THREADS = "numIOThreads";
// Ensemble Placement Policy
public static final String ENSEMBLE_PLACEMENT_POLICY = "ensemblePlacementPolicy";
protected static final String NETWORK_TOPOLOGY_STABILIZE_PERIOD_SECONDS = "networkTopologyStabilizePeriodSeconds";
protected static final String READ_REORDER_THRESHOLD_PENDING_REQUESTS = "readReorderThresholdPendingRequests";
protected static final String ENSEMBLE_PLACEMENT_POLICY_ORDER_SLOW_BOOKIES =
"ensemblePlacementPolicyOrderSlowBookies";
protected static final String BOOKIE_ADDRESS_RESOLVER_ENABLED = "bookieAddressResolverEnabled";
// Use hostname to resolve local placement info
public static final String USE_HOSTNAME_RESOLVE_LOCAL_NODE_PLACEMENT_POLICY =
"useHostnameResolveLocalNodePlacementPolicy";
// Stats
protected static final String ENABLE_TASK_EXECUTION_STATS = "enableTaskExecutionStats";
protected static final String TASK_EXECUTION_WARN_TIME_MICROS = "taskExecutionWarnTimeMicros";
// Failure History Settings
protected static final String ENABLE_BOOKIE_FAILURE_TRACKING = "enableBookieFailureTracking";
protected static final String BOOKIE_FAILURE_HISTORY_EXPIRATION_MS = "bookieFailureHistoryExpirationMSec";
// Discovery
protected static final String FOLLOW_BOOKIE_ADDRESS_TRACKING = "enableBookieAddressTracking";
// Names of dynamic features
protected static final String DISABLE_ENSEMBLE_CHANGE_FEATURE_NAME = "disableEnsembleChangeFeatureName";
// Role of the client
protected static final String CLIENT_ROLE = "clientRole";
/**
* This client will act as a standard client.
*/
public static final String CLIENT_ROLE_STANDARD = "standard";
/**
* This client will act as a system client, like the {@link Auditor}.
*/
public static final String CLIENT_ROLE_SYSTEM = "system";
// Client auth provider factory class name. It must be configured on Bookies to for the Auditor
protected static final String CLIENT_AUTH_PROVIDER_FACTORY_CLASS = "clientAuthProviderFactoryClass";
// Registration Client
protected static final String REGISTRATION_CLIENT_CLASS = "registrationClientClass";
// Logs
protected static final String CLIENT_CONNECT_BOOKIE_UNAVAILABLE_LOG_THROTTLING =
"clientConnectBookieUnavailableLogThrottling";
/**
* Construct a default client-side configuration.
*/
public ClientConfiguration() {
super();
}
/**
* Construct a client-side configuration using a base configuration.
*
* @param conf
* Base configuration
*/
public ClientConfiguration(AbstractConfiguration conf) {
super();
loadConf(conf);
}
/**
* Get throttle value.
*
* @return throttle value
* @see #setThrottleValue
*/
public int getThrottleValue() {
return this.getInt(THROTTLE, 5000);
}
/**
* Set throttle value.
*
* <p>Since BookKeeper process requests in asynchronous way, it will holds
* those pending request in queue. You may easily run it out of memory
* if producing too many requests than the capability of bookie servers can handle.
* To prevent that from happening, you can set a throttle value here.
*
* <p>Setting the throttle value to 0, will disable any throttling.
*
* @param throttle
* Throttle Value
* @return client configuration
*/
public ClientConfiguration setThrottleValue(int throttle) {
this.setProperty(THROTTLE, Integer.toString(throttle));
return this;
}
/**
* Get autodetection of digest type.
*
* <p>Ignores provided digestType, if enabled and uses one from ledger metadata instead.
* Incompatible with ledger created by bookie versions < 4.2
*
* <p>It is turned on by default since 4.7.
*
* @return flag to enable/disable autodetection of digest type.
*/
public boolean getEnableDigestTypeAutodetection() {
return getBoolean(ENABLE_DIGEST_TYPE_AUTODETECTION, true);
}
/**
* Enable autodetection of digest type.
* Ignores provided digestType, if enabled and uses one from ledger metadata instead.
* Incompatible with ledger created by bookie versions < 4.2
*
* @return client configuration.
*/
public ClientConfiguration setEnableDigestTypeAutodetection(boolean enable) {
this.setProperty(ENABLE_DIGEST_TYPE_AUTODETECTION, enable);
return this;
}
/**
* Get digest type used in bookkeeper admin.
*
* @return digest type
* @see #setBookieRecoveryDigestType
*/
public DigestType getBookieRecoveryDigestType() {
return DigestType.valueOf(this.getString(DIGEST_TYPE, DigestType.CRC32.toString()));
}
/**
* Set digest type used in bookkeeper admin.
*
* <p>Digest Type and Passwd used to open ledgers for admin tool
* For now, assume that all ledgers were created with the same DigestType
* and password. In the future, this admin tool will need to know for each
* ledger, what was the DigestType and password used to create it before it
* can open it. These values will come from System properties, though fixed
* defaults are defined here.
*
* @param digestType
* Digest Type
* @return client configuration
*/
public ClientConfiguration setBookieRecoveryDigestType(DigestType digestType) {
this.setProperty(DIGEST_TYPE, digestType.toString());
return this;
}
/**
* Get passwd used in bookkeeper admin.
*
* @return password
* @see #setBookieRecoveryPasswd
*/
public byte[] getBookieRecoveryPasswd() {
return this.getString(PASSWD, "").getBytes(UTF_8);
}
/**
* Set passwd used in bookkeeper admin.
*
* <p>Digest Type and Passwd used to open ledgers for admin tool
* For now, assume that all ledgers were created with the same DigestType
* and password. In the future, this admin tool will need to know for each
* ledger, what was the DigestType and password used to create it before it
* can open it. These values will come from System properties, though fixed
* defaults are defined here.
*
* @param passwd
* Password
* @return client configuration
*/
public ClientConfiguration setBookieRecoveryPasswd(byte[] passwd) {
setProperty(PASSWD, new String(passwd, UTF_8));
return this;
}
/**
* Is tcp connection no delay.
*
* @return tcp socket nodelay setting
* @see #setClientTcpNoDelay
*/
public boolean getClientTcpNoDelay() {
return getBoolean(CLIENT_TCP_NODELAY, true);
}
/**
* Set socket nodelay setting.
*
* <p>This settings is used to enabled/disabled Nagle's algorithm, which is a means of
* improving the efficiency of TCP/IP networks by reducing the number of packets
* that need to be sent over the network. If you are sending many small messages,
* such that more than one can fit in a single IP packet, setting client.tcpnodelay
* to false to enable Nagle algorithm can provide better performance.
* <br>
* Default value is true.
*
* @param noDelay
* NoDelay setting
* @return client configuration
*/
public ClientConfiguration setClientTcpNoDelay(boolean noDelay) {
setProperty(CLIENT_TCP_NODELAY, Boolean.toString(noDelay));
return this;
}
/**
* get socket keepalive.
*
* @return socket keepalive setting
*/
public boolean getClientSockKeepalive() {
return getBoolean(CLIENT_SOCK_KEEPALIVE, true);
}
/**
* Set socket keepalive setting.
*
* <p>This setting is used to send keep-alive messages on connection-oriented sockets.
*
* @param keepalive
* KeepAlive setting
* @return client configuration
*/
public ClientConfiguration setClientSockKeepalive(boolean keepalive) {
setProperty(CLIENT_SOCK_KEEPALIVE, Boolean.toString(keepalive));
return this;
}
/**
* Get client netty channel send buffer size.
*
* @return client netty channel send buffer size
*/
public int getClientSendBufferSize() {
return getInt(CLIENT_SENDBUFFER_SIZE, 1 * 1024 * 1024);
}
/**
* Set client netty channel send buffer size.
*
* @param bufferSize
* client netty channel send buffer size.
* @return client configuration.
*/
public ClientConfiguration setClientSendBufferSize(int bufferSize) {
setProperty(CLIENT_SENDBUFFER_SIZE, bufferSize);
return this;
}
/**
* Get client netty channel receive buffer size.
*
* @return client netty channel receive buffer size.
*/
public int getClientReceiveBufferSize() {
return getInt(CLIENT_RECEIVEBUFFER_SIZE, 1 * 1024 * 1024);
}
/**
* Set client netty channel receive buffer size.
*
* @param bufferSize
* netty channel receive buffer size.
* @return client configuration.
*/
public ClientConfiguration setClientReceiveBufferSize(int bufferSize) {
setProperty(CLIENT_RECEIVEBUFFER_SIZE, bufferSize);
return this;
}
/**
* Get client netty channel write buffer low water mark.
*
* @return netty channel write buffer low water mark.
*/
public int getClientWriteBufferLowWaterMark() {
return getInt(CLIENT_WRITEBUFFER_LOW_WATER_MARK, 384 * 1024);
}
/**
* Set client netty channel write buffer low water mark.
*
* @param waterMark
* netty channel write buffer low water mark.
* @return client configuration.
*/
public ClientConfiguration setClientWriteBufferLowWaterMark(int waterMark) {
setProperty(CLIENT_WRITEBUFFER_LOW_WATER_MARK, waterMark);
return this;
}
/**
* Get client netty channel write buffer high water mark.
*
* @return netty channel write buffer high water mark.
*/
public int getClientWriteBufferHighWaterMark() {
return getInt(CLIENT_WRITEBUFFER_HIGH_WATER_MARK, 512 * 1024);
}
/**
* Set client netty channel write buffer high water mark.
*
* @param waterMark
* netty channel write buffer high water mark.
* @return client configuration.
*/
public ClientConfiguration setClientWriteBufferHighWaterMark(int waterMark) {
setProperty(CLIENT_WRITEBUFFER_HIGH_WATER_MARK, waterMark);
return this;
}
/**
* Get the tick duration in milliseconds that used for timeout timer.
*
* @return tick duration in milliseconds
*/
public long getTimeoutTimerTickDurationMs() {
return getLong(TIMEOUT_TIMER_TICK_DURATION_MS, 100);
}
/**
* Set the tick duration in milliseconds that used for timeout timer.
*
* @param tickDuration
* tick duration in milliseconds.
* @return client configuration.
*/
public ClientConfiguration setTimeoutTimerTickDurationMs(long tickDuration) {
setProperty(TIMEOUT_TIMER_TICK_DURATION_MS, tickDuration);
return this;
}
/**
* Get number of ticks that used for timeout timer.
*
* @return number of ticks that used for timeout timer.
*/
public int getTimeoutTimerNumTicks() {
return getInt(TIMEOUT_TIMER_NUM_TICKS, 1024);
}
/**
* Set number of ticks that used for timeout timer.
*
* @param numTicks
* number of ticks that used for timeout timer.
* @return client configuration.
*/
public ClientConfiguration setTimeoutTimerNumTicks(int numTicks) {
setProperty(TIMEOUT_TIMER_NUM_TICKS, numTicks);
return this;
}
/**
* Get client netty connect timeout in millis.
*
* @return client netty connect timeout in millis.
*/
public int getClientConnectTimeoutMillis() {
// 10 seconds as netty default value.
return getInt(CLIENT_CONNECT_TIMEOUT_MILLIS, 10000);
}
/**
* Set client netty connect timeout in millis.
*
* @param connectTimeoutMillis
* client netty connect timeout in millis.
* @return client configuration.
*/
public ClientConfiguration setClientConnectTimeoutMillis(int connectTimeoutMillis) {
setProperty(CLIENT_CONNECT_TIMEOUT_MILLIS, connectTimeoutMillis);
return this;
}
/**
* Get client netty TCP user timeout in millis (only for Epoll channels).
*
* @return client netty Epoll user tcp timeout in millis.
* @throws NoSuchElementException if the property is not set.
*/
public int getTcpUserTimeoutMillis() {
return getInt(CLIENT_TCP_USER_TIMEOUT_MILLIS);
}
/**
* Set client netty TCP user timeout in millis (only for Epoll channels).
*
* @param tcpUserTimeoutMillis
* client netty TCP user timeout in millis.
* @return client configuration.
*/
public ClientConfiguration setTcpUserTimeoutMillis(int tcpUserTimeoutMillis) {
setProperty(CLIENT_TCP_USER_TIMEOUT_MILLIS, tcpUserTimeoutMillis);
return this;
}
/**
* Get num channels per bookie.
*
* @return num channels per bookie.
*/
public int getNumChannelsPerBookie() {
return getInt(NUM_CHANNELS_PER_BOOKIE, 1);
}
/**
* Set num channels per bookie.
*
* @param numChannelsPerBookie
* num channels per bookie.
* @return client configuration.
*/
public ClientConfiguration setNumChannelsPerBookie(int numChannelsPerBookie) {
setProperty(NUM_CHANNELS_PER_BOOKIE, numChannelsPerBookie);
return this;
}
/**
* Use older Bookkeeper wire protocol (no protobuf).
*
* @return whether or not to use older Bookkeeper wire protocol (no protobuf)
*/
public boolean getUseV2WireProtocol() {
return getBoolean(USE_V2_WIRE_PROTOCOL, false);
}
/**
* Set whether or not to use older Bookkeeper wire protocol (no protobuf).
*
* @param useV2WireProtocol
* whether or not to use older Bookkeeper wire protocol (no protobuf)
* @return client configuration.
*/
public ClientConfiguration setUseV2WireProtocol(boolean useV2WireProtocol) {
setProperty(USE_V2_WIRE_PROTOCOL, useV2WireProtocol);
return this;
}
/**
* Get the socket read timeout. This is the number of
* seconds we wait without hearing a response from a bookie
* before we consider it failed.
*
* <p>The default is 5 seconds.
*
* @return the current read timeout in seconds
* @deprecated use {@link #getReadEntryTimeout()} or {@link #getAddEntryTimeout()} instead
*/
@Deprecated
public int getReadTimeout() {
return getInt(READ_TIMEOUT, 5);
}
/**
* Set the socket read timeout.
* @see #getReadTimeout()
* @param timeout The new read timeout in seconds
* @return client configuration
* @deprecated use {@link #setReadEntryTimeout(int)} or {@link #setAddEntryTimeout(int)} instead
*/
@Deprecated
public ClientConfiguration setReadTimeout(int timeout) {
setProperty(READ_TIMEOUT, Integer.toString(timeout));
return this;
}
/**
* Get the timeout for add request. This is the number of seconds we wait without hearing
* a response for add request from a bookie before we consider it failed.
*
* <p>The default value is 5 second for backwards compatibility.
*
* @return add entry timeout.
*/
@SuppressWarnings("deprecation")
public int getAddEntryTimeout() {
return getInt(ADD_ENTRY_TIMEOUT_SEC, getReadTimeout());
}
/**
* Set timeout for add entry request.
* @see #getAddEntryTimeout()
*
* @param timeout
* The new add entry timeout in seconds.
* @return client configuration.
*/
public ClientConfiguration setAddEntryTimeout(int timeout) {
setProperty(ADD_ENTRY_TIMEOUT_SEC, timeout);
return this;
}
/**
* Get the timeout for top-level add request. That is, the amount of time we should spend
* waiting for ack quorum.
*
* @return add entry ack quorum timeout.
*/
public int getAddEntryQuorumTimeout() {
return getInt(ADD_ENTRY_QUORUM_TIMEOUT_SEC, -1);
}
/**
* Set timeout for top-level add entry request.
* @see #getAddEntryQuorumTimeout()
*
* @param timeout
* The new add entry ack quorum timeout in seconds.
* @return client configuration.
*/
public ClientConfiguration setAddEntryQuorumTimeout(int timeout) {
setProperty(ADD_ENTRY_QUORUM_TIMEOUT_SEC, timeout);
return this;
}
/**
* Get the timeout for read entry. This is the number of seconds we wait without hearing
* a response for read entry request from a bookie before we consider it failed. By default,
* we use socket timeout specified at {@link #getReadTimeout()}.
*
* @return read entry timeout.
*/
@SuppressWarnings("deprecation")
public int getReadEntryTimeout() {
return getInt(READ_ENTRY_TIMEOUT_SEC, getReadTimeout());
}
/**
* Set the timeout for read entry request.
* @see #getReadEntryTimeout()
*
* @param timeout
* The new read entry timeout in seconds.
* @return client configuration.
*/
public ClientConfiguration setReadEntryTimeout(int timeout) {
setProperty(READ_ENTRY_TIMEOUT_SEC, timeout);
return this;
}
/**
* Get the interval between successive executions of the operation timeout monitor. This value is in seconds.
*
* @see #setTimeoutMonitorIntervalSec(long)
* @return the interval at which request timeouts will be checked
*/
public long getTimeoutMonitorIntervalSec() {
int minTimeout = Math.min(Math.min(getAddEntryQuorumTimeout(),
getAddEntryTimeout()), getReadEntryTimeout());
return getLong(TIMEOUT_MONITOR_INTERVAL_SEC, Math.max(minTimeout / 2, 1));
}
/**
* Set the interval between successive executions of the operation timeout monitor. The value in seconds.
* Every X seconds, all outstanding add and read operations are checked to see if they have been running
* for longer than their configured timeout. Any that have been will be errored out.
*
* <p>This timeout should be set to a value which is a fraction of the values of
* {@link #getAddEntryQuorumTimeout}, {@link #getAddEntryTimeout} and {@link #getReadEntryTimeout},
* so that these timeouts run in a timely fashion.
*
* @param timeoutInterval The timeout monitor interval, in seconds
* @return client configuration
*/
public ClientConfiguration setTimeoutMonitorIntervalSec(long timeoutInterval) {
setProperty(TIMEOUT_MONITOR_INTERVAL_SEC, Long.toString(timeoutInterval));
return this;
}
/**
* Get the interval between successive executions of the PerChannelBookieClient's TimeoutTask. This value is in
* milliseconds. Every X milliseconds, the timeout task will be executed and it will error out entries that have
* timed out.
*
* <p>We do it more aggressive to not accumulate pending requests due to slow responses.
*
* @return the interval at which request timeouts will be checked
*/
@Deprecated
public long getTimeoutTaskIntervalMillis() {
return getLong(TIMEOUT_TASK_INTERVAL_MILLIS,
TimeUnit.SECONDS.toMillis(Math.min(getAddEntryTimeout(), getReadEntryTimeout())) / 2);
}
@Deprecated
public ClientConfiguration setTimeoutTaskIntervalMillis(long timeoutMillis) {
setProperty(TIMEOUT_TASK_INTERVAL_MILLIS, Long.toString(timeoutMillis));
return this;
}
/**
* Get the configured interval between explicit LACs to bookies.
* Generally LACs are piggy-backed on writes, and user can configure
* the interval between these protocol messages. A value of '0' disables
* sending any explicit LACs.
*
* @return interval between explicit LACs
*/
public int getExplictLacInterval() {
return getInt(EXPLICIT_LAC_INTERVAL, 0);
}
/**
* Set the interval to check the need for sending an explicit LAC.
* @param interval
* Number of milli seconds between checking the need for sending an explict LAC.
* @return Client configuration.
*/
public ClientConfiguration setExplictLacInterval(int interval) {
setProperty(EXPLICIT_LAC_INTERVAL, interval);
return this;
}
/**
* Get the tick duration in milliseconds that used for the
* HashedWheelTimer that used by PCBC to timeout
* requests.
*
* @return tick duration in milliseconds
*/
@Deprecated
public long getPCBCTimeoutTimerTickDurationMs() {
return getLong(PCBC_TIMEOUT_TIMER_TICK_DURATION_MS, 100);
}
/**
* Set the tick duration in milliseconds that used for
* HashedWheelTimer that used by PCBC to timeout
* requests. Be aware of HashedWheelTimer if you
* are going to modify this setting.
*
* @see #getPCBCTimeoutTimerTickDurationMs()
*
* @param tickDuration
* tick duration in milliseconds.
* @return client configuration.
*/
@Deprecated
public ClientConfiguration setPCBCTimeoutTimerTickDurationMs(long tickDuration) {
setProperty(PCBC_TIMEOUT_TIMER_TICK_DURATION_MS, tickDuration);
return this;
}
/**
* Get number of ticks that used for
* HashedWheelTimer that used by PCBC to timeout
* requests.
*
* @return number of ticks that used for timeout timer.
*/
@Deprecated
public int getPCBCTimeoutTimerNumTicks() {
return getInt(PCBC_TIMEOUT_TIMER_NUM_TICKS, 1024);
}
/**
* Set number of ticks that used for
* HashedWheelTimer that used by PCBC to timeout request.
* Be aware of HashedWheelTimer if you are going to modify
* this setting.
*
* @see #getPCBCTimeoutTimerNumTicks()
*
* @param numTicks
* number of ticks that used for timeout timer.
* @return client configuration.
*/
@Deprecated
public ClientConfiguration setPCBCTimeoutTimerNumTicks(int numTicks) {
setProperty(PCBC_TIMEOUT_TIMER_NUM_TICKS, numTicks);
return this;
}
/**
* Timeout controlling wait on request send in case of unresponsive bookie(s)
* (i.e. bookie in long GC etc.)
*
* @return timeout value
* negative value disables the feature
* 0 to allow request to fail immediately
* Default is -1 (disabled)
*/
public long getWaitTimeoutOnBackpressureMillis() {
return getLong(WAIT_TIMEOUT_ON_BACKPRESSURE, -1);
}
/**
* Timeout controlling wait on request send in case of unresponsive bookie(s)
* (i.e. bookie in long GC etc.)
*
* @param value
* negative value disables the feature
* 0 to allow request to fail immediately
* Default is -1 (disabled)
* @return client configuration.
*/
public ClientConfiguration setWaitTimeoutOnBackpressureMillis(long value) {
setProperty(WAIT_TIMEOUT_ON_BACKPRESSURE, value);
return this;
}
/**
* Get the number of worker threads. This is the number of
* worker threads used by bookkeeper client to submit operations.
*
* @return the number of worker threads
*/
public int getNumWorkerThreads() {
return getInt(NUM_WORKER_THREADS, Runtime.getRuntime().availableProcessors());
}
/**
* Set the number of worker threads.
*
* <p>
* NOTE: setting the number of worker threads after BookKeeper object is constructed
* will not take any effect on the number of threads in the pool.
* </p>
*
* @see #getNumWorkerThreads()
* @param numThreads number of worker threads used for bookkeeper
* @return client configuration
*/
public ClientConfiguration setNumWorkerThreads(int numThreads) {
setProperty(NUM_WORKER_THREADS, numThreads);
return this;
}
/**
* Get the number of IO threads. This is the number of
* threads used by Netty to handle TCP connections.
*
* @return the number of IO threads
*/
public int getNumIOThreads() {
return getInt(NUM_IO_THREADS, 2 * Runtime.getRuntime().availableProcessors());
}
/**
* Set the number of IO threads.
*
* <p>
* This is the number of threads used by Netty to handle TCP connections.
* </p>
*
* <p>
* NOTE: setting the number of IO threads after BookKeeper object is constructed
* will not take any effect on the number of threads in the pool.
* </p>
*
* @see #getNumIOThreads()
* @param numThreads number of IO threads used for bookkeeper
* @return client configuration
*/
public ClientConfiguration setNumIOThreads(int numThreads) {
setProperty(NUM_IO_THREADS, numThreads);
return this;
}
/**
* Get the period of time after which a speculative entry read should be triggered.
* A speculative entry read is sent to the next replica bookie before
* an error or response has been received for the previous entry read request.
*
* <p>A speculative entry read is only sent if we have not heard from the current
* replica bookie during the entire read operation which may comprise of many entries.
*
* <p>Speculative reads allow the client to avoid having to wait for the connect timeout
* in the case that a bookie has failed. It induces higher load on the network and on
* bookies. This should be taken into account before changing this configuration value.
*
* @see org.apache.bookkeeper.client.LedgerHandle#asyncReadEntries
* @return the speculative read timeout in milliseconds. Default 2000.
*/
public int getSpeculativeReadTimeout() {
return getInt(SPECULATIVE_READ_TIMEOUT, 2000);
}
/**
* Set the speculative read timeout. A lower timeout will reduce read latency in the
* case of a failed bookie, while increasing the load on bookies and the network.
*
* <p>The default is 2000 milliseconds. A value of 0 will disable speculative reads
* completely.
*
* @see #getSpeculativeReadTimeout()
* @param timeout the timeout value, in milliseconds
* @return client configuration
*/
public ClientConfiguration setSpeculativeReadTimeout(int timeout) {
setProperty(SPECULATIVE_READ_TIMEOUT, timeout);
return this;
}
/**
* Get the first speculative read timeout.
*
* @return first speculative read timeout.
*/
public int getFirstSpeculativeReadTimeout() {
return getInt(FIRST_SPECULATIVE_READ_TIMEOUT, getSpeculativeReadTimeout());
}
/**
* Set the first speculative read timeout.
*
* @param timeout
* first speculative read timeout.
* @return client configuration.
*/
public ClientConfiguration setFirstSpeculativeReadTimeout(int timeout) {
setProperty(FIRST_SPECULATIVE_READ_TIMEOUT, timeout);
return this;
}
/**
* Multipler to use when determining time between successive speculative read requests.
*
* @return speculative read timeout backoff multiplier.
*/
public float getSpeculativeReadTimeoutBackoffMultiplier() {
return getFloat(SPECULATIVE_READ_TIMEOUT_BACKOFF_MULTIPLIER, 2.0f);
}
/**
* Set the multipler to use when determining time between successive speculative read requests.
*
* @param speculativeReadTimeoutBackoffMultiplier
* multipler to use when determining time between successive speculative read requests.
* @return client configuration.
*/
public ClientConfiguration setSpeculativeReadTimeoutBackoffMultiplier(
float speculativeReadTimeoutBackoffMultiplier) {
setProperty(SPECULATIVE_READ_TIMEOUT_BACKOFF_MULTIPLIER, speculativeReadTimeoutBackoffMultiplier);
return this;
}
/**
* Multipler to use when determining time between successive speculative read LAC requests.
*
* @return speculative read LAC timeout backoff multiplier.
*/
public float getSpeculativeReadLACTimeoutBackoffMultiplier() {
return getFloat(SPECULATIVE_READ_LAC_TIMEOUT_BACKOFF_MULTIPLIER, 2.0f);
}
/**
* Set the multipler to use when determining time between successive speculative read LAC requests.
*
* @param speculativeReadLACTimeoutBackoffMultiplier
* multipler to use when determining time between successive speculative read LAC requests.
* @return client configuration.
*/
public ClientConfiguration setSpeculativeReadLACTimeoutBackoffMultiplier(
float speculativeReadLACTimeoutBackoffMultiplier) {
setProperty(SPECULATIVE_READ_LAC_TIMEOUT_BACKOFF_MULTIPLIER, speculativeReadLACTimeoutBackoffMultiplier);
return this;
}
/**
* Get the max speculative read timeout.
*
* @return max speculative read timeout.
*/
public int getMaxSpeculativeReadTimeout() {
return getInt(MAX_SPECULATIVE_READ_TIMEOUT, getSpeculativeReadTimeout());
}
/**
* Set the max speculative read timeout.
*
* @param timeout
* max speculative read timeout.
* @return client configuration.
*/
public ClientConfiguration setMaxSpeculativeReadTimeout(int timeout) {
setProperty(MAX_SPECULATIVE_READ_TIMEOUT, timeout);
return this;
}
/**
* Get the period of time after which the first speculative read last add confirmed and entry
* should be triggered.
* A speculative entry request is sent to the next replica bookie before
* an error or response has been received for the previous entry read request.
*
* <p>A speculative entry read is only sent if we have not heard from the current
* replica bookie during the entire read operation which may comprise of many entries.
*
* <p>Speculative requests allow the client to avoid having to wait for the connect timeout
* in the case that a bookie has failed. It induces higher load on the network and on
* bookies. This should be taken into account before changing this configuration value.
*
* @return the speculative request timeout in milliseconds. Default 1500.
*/
public int getFirstSpeculativeReadLACTimeout() {
return getInt(FIRST_SPECULATIVE_READ_LAC_TIMEOUT, 1500);
}
/**
* Get the maximum interval between successive speculative read last add confirmed and entry
* requests.
*
* @return the max speculative request timeout in milliseconds. Default 5000.
*/
public int getMaxSpeculativeReadLACTimeout() {
return getInt(MAX_SPECULATIVE_READ_LAC_TIMEOUT, 5000);
}
/**
* Set the period of time after which the first speculative read last add confirmed and entry
* should be triggered.
* A lower timeout will reduce read latency in the case of a failed bookie,
* while increasing the load on bookies and the network.
*
* <p>The default is 1500 milliseconds. A value of 0 will disable speculative reads
* completely.
*
* @see #getSpeculativeReadTimeout()
* @param timeout the timeout value, in milliseconds
* @return client configuration
*/
public ClientConfiguration setFirstSpeculativeReadLACTimeout(int timeout) {
setProperty(FIRST_SPECULATIVE_READ_LAC_TIMEOUT, timeout);
return this;
}
/**
* Set the maximum interval between successive speculative read last add confirmed and entry
* requests.
*
* @param timeout the timeout value, in milliseconds
* @return client configuration
*/
public ClientConfiguration setMaxSpeculativeReadLACTimeout(int timeout) {
setProperty(MAX_SPECULATIVE_READ_LAC_TIMEOUT, timeout);
return this;
}
/**
* Whether to enable parallel reading in recovery read.
*
* @return true if enable parallel reading in recovery read. otherwise, return false.
*/
public boolean getEnableParallelRecoveryRead() {
return getBoolean(ENABLE_PARALLEL_RECOVERY_READ, false);
}
/**
* Enable/Disable parallel reading in recovery read.
*
* @param enabled
* flag to enable/disable parallel reading in recovery read.
* @return client configuration.
*/
public ClientConfiguration setEnableParallelRecoveryRead(boolean enabled) {
setProperty(ENABLE_PARALLEL_RECOVERY_READ, enabled);
return this;
}
/**
* Get Recovery Read Batch Size.
*
* @return recovery read batch size.
*/
public int getRecoveryReadBatchSize() {
return getInt(RECOVERY_READ_BATCH_SIZE, 1);
}
/**
* Set Recovery Read Batch Size.
*
* @param batchSize
* recovery read batch size.
* @return client configuration.
*/
public ClientConfiguration setRecoveryReadBatchSize(int batchSize) {
setProperty(RECOVERY_READ_BATCH_SIZE, batchSize);
return this;
}
/**
* If reorder read sequence enabled or not.
*
* @return true if reorder read sequence is enabled, otherwise false.
*/
public boolean isReorderReadSequenceEnabled() {
return getBoolean(REORDER_READ_SEQUENCE_ENABLED, false);
}
/**
* Enable/disable reordering read sequence on reading entries.
*
* <p>If this flag is enabled, the client will use
* {@link EnsemblePlacementPolicy#reorderReadSequence(java.util.List,
* org.apache.bookkeeper.client.BookiesHealthInfo, org.apache.bookkeeper.client.DistributionSchedule.WriteSet)}
* to figure out a better read sequence to attempt reads from replicas and use
* {@link EnsemblePlacementPolicy#reorderReadLACSequence(java.util.List,
* org.apache.bookkeeper.client.BookiesHealthInfo, org.apache.bookkeeper.client.DistributionSchedule.WriteSet)}
* to figure out a better read sequence to attempt long poll reads from replicas.
*
* <p>The order of read sequence is determined by the placement policy implementations.
*
* @param enabled the flag to enable/disable reorder read sequence.
* @return client configuration instance.
*/
public ClientConfiguration setReorderReadSequenceEnabled(boolean enabled) {
setProperty(REORDER_READ_SEQUENCE_ENABLED, enabled);
return this;
}
/**
* If read operation should be sticky to a single bookie or not.
*
* @return true if reorder read sequence is enabled, otherwise false.
*/
public boolean isStickyReadsEnabled() {
return getBoolean(STICKY_READS_ENABLED, false);
}
/**
* Enable/disable having read operations for a ledger to be sticky to
* a single bookie.
*
* <p>If this flag is enabled, the client will use one single bookie (by
* preference) to read all entries for a ledger.
*
* <p>Having all the read to one bookie will increase the chances that
* a read request will be fullfilled by Bookie read cache (or OS file
* system cache) when doing sequential reads.
*
* @param enabled the flag to enable/disable sticky reads.
* @return client configuration instance.
*/
public ClientConfiguration setStickyReadsEnabled(boolean enabled) {
setProperty(STICKY_READS_ENABLED, enabled);
return this;
}
/**
* Get Ensemble Placement Policy Class.
*
* @return ensemble placement policy class.
*/
public Class<? extends EnsemblePlacementPolicy> getEnsemblePlacementPolicy()
throws ConfigurationException {
return ReflectionUtils.getClass(this, ENSEMBLE_PLACEMENT_POLICY,
RackawareEnsemblePlacementPolicy.class,
EnsemblePlacementPolicy.class,
DEFAULT_LOADER);
}
/**
* Set Ensemble Placement Policy Class.
*
* @param policyClass
* Ensemble Placement Policy Class.
*/
public ClientConfiguration setEnsemblePlacementPolicy(Class<? extends EnsemblePlacementPolicy> policyClass) {
setProperty(ENSEMBLE_PLACEMENT_POLICY, policyClass.getName());
return this;
}
/**
* Get the threshold for the number of pending requests beyond which to reorder
* reads. If <= zero, this feature is turned off.
*
* @return the threshold for the number of pending requests beyond which to
* reorder reads.
*/
public int getReorderThresholdPendingRequests() {
return getInt(READ_REORDER_THRESHOLD_PENDING_REQUESTS, 0);
}
/**
* Set the threshold for the number of pending requests beyond which to reorder
* reads. If zero, this feature is turned off.
*
* @param threshold
* The threshold for the number of pending requests beyond which to
* reorder reads.
*/
public ClientConfiguration setReorderThresholdPendingRequests(int threshold) {
setProperty(READ_REORDER_THRESHOLD_PENDING_REQUESTS, threshold);
return this;
}
/**
* Get the network topology stabilize period in seconds. if it is zero, this feature is turned off.
*
* @return network topology stabilize period in seconds.
*/
public int getNetworkTopologyStabilizePeriodSeconds() {
return getInt(NETWORK_TOPOLOGY_STABILIZE_PERIOD_SECONDS, 0);
}
/**
* Set the network topology stabilize period in seconds.
*
* @see #getNetworkTopologyStabilizePeriodSeconds()
* @param seconds stabilize period in seconds
* @return client configuration.
*/
public ClientConfiguration setNetworkTopologyStabilizePeriodSeconds(int seconds) {
setProperty(NETWORK_TOPOLOGY_STABILIZE_PERIOD_SECONDS, seconds);
return this;
}
/**
* Whether to order slow bookies in placement policy.
*
* @return flag of whether to order slow bookies in placement policy or not.
*/
public boolean getEnsemblePlacementPolicySlowBookies() {
return getBoolean(ENSEMBLE_PLACEMENT_POLICY_ORDER_SLOW_BOOKIES, false);
}
/**
* Enable/Disable ordering slow bookies in placement policy.
*
* @param enabled
* flag to enable/disable ordering slow bookies in placement policy.
* @return client configuration.
*/
public ClientConfiguration setEnsemblePlacementPolicySlowBookies(boolean enabled) {
setProperty(ENSEMBLE_PLACEMENT_POLICY_ORDER_SLOW_BOOKIES, enabled);
return this;
}
/**
* Whether to enable BookieAddressResolver.
*
* @return flag to enable/disable BookieAddressResolver.
*/
public boolean getBookieAddressResolverEnabled() {
return getBoolean(BOOKIE_ADDRESS_RESOLVER_ENABLED, true);
}
/**
* Enable/Disable BookieAddressResolver.
*
* <p>
* If this flag is true, read bookie information from the metadata service (e.g. ZooKeeper) to resolve the address
* from each bookie ID. If all bookie IDs in the cluster are "address:port" or "hostname:port", you can set this
* flag to false to reduce requests to the metadata service.
* </p>
*
* @param enabled
* flag to enable/disable BookieAddressResolver.
* @return client configuration.
*/
public ClientConfiguration setBookieAddressResolverEnabled(boolean enabled) {
setProperty(BOOKIE_ADDRESS_RESOLVER_ENABLED, enabled);
return this;
}
/**
* Set the flag to use hostname to resolve local node placement policy.
* @param useHostnameResolveLocalNodePlacementPolicy
*/
public void setUseHostnameResolveLocalNodePlacementPolicy(boolean useHostnameResolveLocalNodePlacementPolicy) {
setProperty(USE_HOSTNAME_RESOLVE_LOCAL_NODE_PLACEMENT_POLICY, useHostnameResolveLocalNodePlacementPolicy);
}
/**
* Get whether to use hostname to resolve local node placement policy.
* @return
*/
public boolean getUseHostnameResolveLocalNodePlacementPolicy() {
return getBoolean(USE_HOSTNAME_RESOLVE_LOCAL_NODE_PLACEMENT_POLICY, false);
}
/**
* Whether to enable recording task execution stats.
*
* @return flag to enable/disable recording task execution stats.
*/
public boolean getEnableTaskExecutionStats() {
return getBoolean(ENABLE_TASK_EXECUTION_STATS, false);
}
/**
* Enable/Disable recording task execution stats.
*
* @param enabled
* flag to enable/disable recording task execution stats.
* @return client configuration.
*/
public ClientConfiguration setEnableTaskExecutionStats(boolean enabled) {
setProperty(ENABLE_TASK_EXECUTION_STATS, enabled);
return this;
}
/**
* Get task execution duration which triggers a warning.
*
* @return time in microseconds which triggers a warning.
*/
public long getTaskExecutionWarnTimeMicros() {
return getLong(TASK_EXECUTION_WARN_TIME_MICROS, TimeUnit.SECONDS.toMicros(1));
}
/**
* Set task execution duration which triggers a warning.
*
* @param warnTime
* time in microseconds which triggers a warning.
* @return client configuration.
*/
public ClientConfiguration setTaskExecutionWarnTimeMicros(long warnTime) {
setProperty(TASK_EXECUTION_WARN_TIME_MICROS, warnTime);
return this;
}
/**
* Check if bookie health check is enabled.
*
* @return
*/
public boolean isBookieHealthCheckEnabled() {
return getBoolean(BOOKIE_HEALTH_CHECK_ENABLED, false);
}
/**
* Enables the bookie health check.
*
* <p>
* If the number of read/write errors for a bookie exceeds {@link #getBookieErrorThresholdPerInterval()} per
* interval, that bookie is quarantined for {@link #getBookieQuarantineTimeSeconds()} seconds. During this
* quarantined period, the client will try not to use this bookie when creating new ensembles.
* </p>
*
* <p>By default, the bookie health check is <b>disabled</b>.
*
* @return client configuration
*/
public ClientConfiguration enableBookieHealthCheck() {
setProperty(BOOKIE_HEALTH_CHECK_ENABLED, true);
return this;
}
/**
* Get the bookie health check interval in seconds.
*
* @return
*/
public int getBookieHealthCheckIntervalSeconds() {
return getInt(BOOKIE_HEALTH_CHECK_INTERVAL_SECONDS, 60);
}
/**
* Set the bookie health check interval. Default is 60 seconds.
*
* <p>
* Note: Please {@link #enableBookieHealthCheck()} to use this configuration.
* </p>
*
* @param interval
* @param unit
* @return client configuration
*/
public ClientConfiguration setBookieHealthCheckInterval(int interval, TimeUnit unit) {
setProperty(BOOKIE_HEALTH_CHECK_INTERVAL_SECONDS, unit.toSeconds(interval));
return this;
}
/**
* Get the error threshold for a bookie to be quarantined.
*
* @return
*/
public long getBookieErrorThresholdPerInterval() {
return getLong(BOOKIE_ERROR_THRESHOLD_PER_INTERVAL, 100);
}
/**
* Set the error threshold per interval ({@link #getBookieHealthCheckIntervalSeconds()}) for a bookie before it is
* quarantined. Default is 100 errors per minute.
*
* <p>
* Note: Please {@link #enableBookieHealthCheck()} to use this configuration.
* </p>
*
* @param thresholdPerInterval
*
* @return client configuration
*/
public ClientConfiguration setBookieErrorThresholdPerInterval(long thresholdPerInterval) {
setProperty(BOOKIE_ERROR_THRESHOLD_PER_INTERVAL, thresholdPerInterval);
return this;
}
/**
* Get the time for which a bookie will be quarantined.
*
* @return
*/
public int getBookieQuarantineTimeSeconds() {
return getInt(BOOKIE_QUARANTINE_TIME_SECONDS, 1800);
}
/**
* Set the time for which a bookie will be quarantined. Default is 30 minutes.
*
* <p>
* Note: Please {@link #enableBookieHealthCheck()} to use this configuration.
* </p>
*
* @param quarantineTime
* @param unit
* @return client configuration
*/
public ClientConfiguration setBookieQuarantineTime(int quarantineTime, TimeUnit unit) {
setProperty(BOOKIE_QUARANTINE_TIME_SECONDS, unit.toSeconds(quarantineTime));
return this;
}
/**
* Get the bookie quarantine ratio.
*
* @return
*/
public double getBookieQuarantineRatio() {
return getDouble(BOOKIE_QUARANTINE_RATIO, 1.0);
}
/**
* set the bookie quarantine ratio. default is 1.0.
*
* @param ratio
* @return client configuration
*/
public ClientConfiguration setBookieQuarantineRatio(double ratio) {
setProperty(BOOKIE_QUARANTINE_RATIO, ratio);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public ClientConfiguration setNettyMaxFrameSizeBytes(int maxSize) {
super.setNettyMaxFrameSizeBytes(maxSize);
return this;
}
/**
* Get the time interval between successive calls for bookie get info. Default is 24 hours.
*
* @return
*/
public int getGetBookieInfoIntervalSeconds() {
return getInt(GET_BOOKIE_INFO_INTERVAL_SECONDS, 24 * 60 * 60);
}
/**
* Get the time interval between retries on unsuccessful bookie info request. Default is
* 60s.
*
* @return
*/
public int getGetBookieInfoRetryIntervalSeconds() {
return getInt(GET_BOOKIE_INFO_RETRY_INTERVAL_SECONDS, 60);
}
/**
* Return whether disk weight based placement policy is enabled.
* @return
*/
public boolean getDiskWeightBasedPlacementEnabled() {
return getBoolean(DISK_WEIGHT_BASED_PLACEMENT_ENABLED, false);
}
/**
* Returns the max multiple to use for nodes with very high weight.
* @return max multiple
*/
public int getBookieMaxWeightMultipleForWeightBasedPlacement() {
return getInt(BOOKIE_MAX_MULTIPLE_FOR_WEIGHTED_PLACEMENT, 3);
}
/**
* Return the timeout value for getBookieInfo request.
* @return
*/
public int getBookieInfoTimeout() {
return getInteger(GET_BOOKIE_INFO_TIMEOUT_SECS, 5);
}
/**
* Return the timeout value for startTLS request.
* @return
*/
public int getStartTLSTimeout() {
return getInteger(START_TLS_TIMEOUT_SECS, 10);
}
/**
* Set whether or not disk weight based placement is enabled.
*
* @param isEnabled - boolean indicating enabled or not
* @return client configuration
*/
public ClientConfiguration setDiskWeightBasedPlacementEnabled(boolean isEnabled) {
setProperty(DISK_WEIGHT_BASED_PLACEMENT_ENABLED, isEnabled);
return this;
}
/**
* Set the time interval between successive polls for bookie get info.
*
* @param pollInterval
* @param unit
* @return client configuration
*/
public ClientConfiguration setGetBookieInfoIntervalSeconds(int pollInterval, TimeUnit unit) {
setProperty(GET_BOOKIE_INFO_INTERVAL_SECONDS, unit.toSeconds(pollInterval));
return this;
}
/**
* Set the time interval between retries on unsuccessful GetInfo requests.
*
* @param interval
* @param unit
* @return client configuration
*/
public ClientConfiguration setGetBookieInfoRetryIntervalSeconds(int interval, TimeUnit unit) {
setProperty(GET_BOOKIE_INFO_RETRY_INTERVAL_SECONDS, unit.toSeconds(interval));
return this;
}
/**
* Set the max multiple to use for nodes with very high weight.
*
* @param multiple
* @return client configuration
*/
public ClientConfiguration setBookieMaxWeightMultipleForWeightBasedPlacement(int multiple) {
setProperty(BOOKIE_MAX_MULTIPLE_FOR_WEIGHTED_PLACEMENT, multiple);
return this;
}
/**
* Set the timeout value in secs for the GET_BOOKIE_INFO request.
*
* @param timeoutSecs
* @return client configuration
*/
public ClientConfiguration setGetBookieInfoTimeout(int timeoutSecs) {
setProperty(GET_BOOKIE_INFO_TIMEOUT_SECS, timeoutSecs);
return this;
}
/**
* Set the timeout value in secs for the START_TLS request.
* @param timeoutSecs
* @return client configuration
*/
public ClientConfiguration setStartTLSTimeout(int timeoutSecs) {
setProperty(START_TLS_TIMEOUT_SECS, timeoutSecs);
return this;
}
/**
* Whether hostname verification enabled?
*
* @return true if hostname verification enabled, otherwise false.
*/
public boolean getHostnameVerificationEnabled() {
return getBoolean(TLS_HOSTNAME_VERIFICATION_ENABLED, false);
}
/**
* Enable/Disable hostname verification for tls connection.
*
* @param enabled
* flag to enable/disable tls hostname verification.
* @return client configuration.
*/
public ClientConfiguration setHostnameVerificationEnabled(boolean enabled) {
setProperty(TLS_HOSTNAME_VERIFICATION_ENABLED, enabled);
return this;
}
/**
* Set the client role.
*
* @param role defines how the client will act
* @return client configuration
*/
public ClientConfiguration setClientRole(String role) {
if (role == null) {
throw new NullPointerException();
}
switch (role) {
case CLIENT_ROLE_STANDARD:
case CLIENT_ROLE_SYSTEM:
break;
default:
throw new IllegalArgumentException("invalid role " + role);
}
setProperty(CLIENT_ROLE, role);
return this;
}
/**
* Get the role of the client.
*
* @return the type of client
*/
public String getClientRole() {
return getString(CLIENT_ROLE, CLIENT_ROLE_STANDARD);
}
/**
* Get the keystore type for client. Default is JKS.
*
* @return
*/
public String getTLSKeyStoreType() {
return getString(CLIENT_TLS_KEYSTORE_TYPE, getString(TLS_KEYSTORE_TYPE, "JKS"));
}
/**
* Set the keystore type for client.
*
* @return
*/
public ClientConfiguration setTLSKeyStoreType(String arg) {
setProperty(TLS_KEYSTORE_TYPE, arg);
return this;
}
/**
* Get the keystore path for the client.
*
* @return
*/
public String getTLSKeyStore() {
return getString(CLIENT_TLS_KEYSTORE, getString(TLS_KEYSTORE, null));
}
/**
* Set the keystore path for the client.
*
* @return
*/
public ClientConfiguration setTLSKeyStore(String arg) {
setProperty(TLS_KEYSTORE, arg);
return this;
}
/**
* Get the path to file containing keystore password, if the client keystore is password protected. Default is null.
*
* @return
*/
public String getTLSKeyStorePasswordPath() {
return getString(CLIENT_TLS_KEYSTORE_PASSWORD_PATH, getString(TLS_KEYSTORE_PASSWORD_PATH, null));
}
/**
* Set the path to file containing keystore password, if the client keystore is password protected.
*
* @return
*/
public ClientConfiguration setTLSKeyStorePasswordPath(String arg) {
setProperty(TLS_KEYSTORE_PASSWORD_PATH, arg);
return this;
}
/**
* Get the truststore type for client. Default is JKS.
*
* @return
*/
public String getTLSTrustStoreType() {
return getString(CLIENT_TLS_TRUSTSTORE_TYPE, getString(TLS_TRUSTSTORE_TYPE, "JKS"));
}
/**
* Set the truststore type for client.
*
* @return
*/
public ClientConfiguration setTLSTrustStoreType(String arg) {
setProperty(TLS_TRUSTSTORE_TYPE, arg);
return this;
}
/**
* Get the truststore path for the client.
*
* @return
*/
public String getTLSTrustStore() {
return getString(CLIENT_TLS_TRUSTSTORE, getString(TLS_TRUSTSTORE, null));
}
/**
* Set the truststore path for the client.
*
* @return
*/
public ClientConfiguration setTLSTrustStore(String arg) {
setProperty(TLS_TRUSTSTORE, arg);
return this;
}
/**
* Get the path to file containing truststore password, if the client truststore is password protected. Default is
* null.
*
* @return
*/
public String getTLSTrustStorePasswordPath() {
return getString(CLIENT_TLS_TRUSTSTORE_PASSWORD_PATH, getString(TLS_TRUSTSTORE_PASSWORD_PATH, null));
}
/**
* Set the path to file containing truststore password, if the client truststore is password protected.
*
* @return
*/
public ClientConfiguration setTLSTrustStorePasswordPath(String arg) {
setProperty(TLS_TRUSTSTORE_PASSWORD_PATH, arg);
return this;
}
/**
* Get the path to file containing TLS Certificate.
*
* @return
*/
public String getTLSCertificatePath() {
return getString(TLS_CERTIFICATE_PATH, null);
}
/**
* Set the path to file containing TLS Certificate.
*
* @return
*/
public ClientConfiguration setTLSCertificatePath(String arg) {
setProperty(TLS_CERTIFICATE_PATH, arg);
return this;
}
/**
* Whether to allow opportunistic striping.
*
* @return true if opportunistic striping is enabled
*/
public boolean getOpportunisticStriping() {
return getBoolean(OPPORTUNISTIC_STRIPING, false);
}
/**
* Enable/Disable opportunistic striping.
* <p>
* If set to true, when you are creating a ledger with a given
* ensemble size, the system will automatically handle the
* lack of enough bookies, reducing ensemble size up to
* the write quorum size. This way in little clusters
* you can try to use striping (ensemble size > write quorum size)
* in case that you have enough bookies up and running,
* and degrade automatically to the minimum requested replication count.
* </p>
*
* @param enabled
* flag to enable/disable opportunistic striping.
* @return client configuration.
*/
public ClientConfiguration setOpportunisticStriping(boolean enabled) {
setProperty(OPPORTUNISTIC_STRIPING, enabled);
return this;
}
/**
* Whether to delay ensemble change or not?
*
* @return true if to delay ensemble change, otherwise false.
*/
public boolean getDelayEnsembleChange() {
return getBoolean(DELAY_ENSEMBLE_CHANGE, false);
}
/**
* Enable/Disable delaying ensemble change.
* <p>
* If set to true, ensemble change only happens when it can't meet
* ack quorum requirement. If set to false, ensemble change happens
* immediately when it received a failed write.
* </p>
*
* @param enabled
* flag to enable/disable delaying ensemble change.
* @return client configuration.
*/
public ClientConfiguration setDelayEnsembleChange(boolean enabled) {
setProperty(DELAY_ENSEMBLE_CHANGE, enabled);
return this;
}
/**
* Whether to enable bookie address changes tracking.
*
* @return flag to enable/disable bookie address changes tracking
*/
public boolean getEnableBookieAddressTracking() {
return getBoolean(FOLLOW_BOOKIE_ADDRESS_TRACKING, true);
}
/**
* Enable/Disable bookie address changes tracking.
*
* @param value
* flag to enable/disable bookie address changes tracking
* @return client configuration.
*/
public ClientConfiguration setEnableBookieAddressTracking(boolean value) {
setProperty(FOLLOW_BOOKIE_ADDRESS_TRACKING, value);
return this;
}
/**
* Whether to enable bookie failure tracking.
*
* @return flag to enable/disable bookie failure tracking
*/
public boolean getEnableBookieFailureTracking() {
return getBoolean(ENABLE_BOOKIE_FAILURE_TRACKING, true);
}
/**
* Enable/Disable bookie failure tracking.
*
* @param enabled
* flag to enable/disable bookie failure tracking
* @return client configuration.
*/
public ClientConfiguration setEnableBookieFailureTracking(boolean enabled) {
setProperty(ENABLE_BOOKIE_FAILURE_TRACKING, enabled);
return this;
}
/**
* Get the bookie failure tracking expiration timeout.
*
* @return bookie failure tracking expiration timeout.
*/
public int getBookieFailureHistoryExpirationMSec() {
return getInt(BOOKIE_FAILURE_HISTORY_EXPIRATION_MS, 60000);
}
/**
* Set the bookie failure tracking expiration timeout.
*
* @param expirationMSec
* bookie failure tracking expiration timeout.
* @return client configuration.
*/
public ClientConfiguration setBookieFailureHistoryExpirationMSec(int expirationMSec) {
setProperty(BOOKIE_FAILURE_HISTORY_EXPIRATION_MS, expirationMSec);
return this;
}
/**
* Get the name of the dynamic feature that disables ensemble change.
*
* @return name of the dynamic feature that disables ensemble change
*/
public String getDisableEnsembleChangeFeatureName() {
return getString(DISABLE_ENSEMBLE_CHANGE_FEATURE_NAME, FEATURE_DISABLE_ENSEMBLE_CHANGE);
}
/**
* Set the name of the dynamic feature that disables ensemble change.
*
* @param disableEnsembleChangeFeatureName
* name of the dynamic feature that disables ensemble change
* @return client configuration.
*/
public ClientConfiguration setDisableEnsembleChangeFeatureName(String disableEnsembleChangeFeatureName) {
setProperty(DISABLE_ENSEMBLE_CHANGE_FEATURE_NAME, disableEnsembleChangeFeatureName);
return this;
}
/**
* Get the max allowed ensemble change number.
*
* @return value of MaxAllowedEnsembleChanges, default MAX_VALUE, indicating feature is disable.
*/
public int getMaxAllowedEnsembleChanges() {
return getInt(MAX_ALLOWED_ENSEMBLE_CHANGES, Integer.MAX_VALUE);
}
/**
* Set the max allowed ensemble change number.
*
* @param num
* value of MaxAllowedEnsembleChanges
* @return client configuration.
*/
public ClientConfiguration setMaxAllowedEnsembleChanges(int num) {
setProperty(MAX_ALLOWED_ENSEMBLE_CHANGES, num);
return this;
}
/**
* Option to use Netty Pooled ByteBufs.
*
* @deprecated see {@link BookKeeperBuilder#allocator(io.netty.buffer.ByteBufAllocator)}
*
* @return the value of the option
*/
@Deprecated
public boolean isNettyUsePooledBuffers() {
return getBoolean(NETTY_USE_POOLED_BUFFERS, true);
}
/**
* Enable/Disable the usage of Pooled Netty buffers. While using v2 wire protocol the application will be
* responsible for releasing ByteBufs returned by BookKeeper.
*
* @param enabled
* if enabled BookKeeper will use default Pooled Netty Buffer allocator
*
* @deprecated see {@link BookKeeperBuilder#allocator(io.netty.buffer.ByteBufAllocator)}
*
* @see #setUseV2WireProtocol(boolean)
* @see ByteBuf#release()
* @see LedgerHandle#readEntries(long, long)
*/
public ClientConfiguration setNettyUsePooledBuffers(boolean enabled) {
setProperty(NETTY_USE_POOLED_BUFFERS, enabled);
return this;
}
/**
* Set registration manager class.
*
* @param regClientClass
* ClientClass
* @deprecated since 4.7.0
*/
@Deprecated
public ClientConfiguration setRegistrationClientClass(
Class<? extends RegistrationClient> regClientClass) {
setProperty(REGISTRATION_CLIENT_CLASS, regClientClass);
return this;
}
/**
* Get Registration Client Class.
*
* @return registration manager class.
* @deprecated since 4.7.0
*/
@Deprecated
public Class<? extends RegistrationClient> getRegistrationClientClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, REGISTRATION_CLIENT_CLASS,
ZKRegistrationClient.class, RegistrationClient.class,
DEFAULT_LOADER);
}
/**
* Enable the client to use system time as the ledger creation time.
*
* <p>If this is enabled, the client will write a ctime field into the ledger metadata.
* Otherwise, nothing will be written. The creation time of this ledger will be the ctime
* of the metadata record in metadata store.
*
* @param enabled flag to enable/disable client using system time as the ledger creation time.
*/
public ClientConfiguration setStoreSystemtimeAsLedgerCreationTime(boolean enabled) {
setProperty(STORE_SYSTEMTIME_AS_LEDGER_CREATION_TIME, enabled);
return this;
}
/**
* Return the flag that indicates whether client is using system time as the ledger creation time when
* creating ledgers.
*
* @return the flag that indicates whether client is using system time as the ledger creation time when
* creating ledgers.
*/
public boolean getStoreSystemtimeAsLedgerCreationTime() {
return getBoolean(STORE_SYSTEMTIME_AS_LEDGER_CREATION_TIME, false);
}
/**
* Set the log frequency when a bookie is unavailable, in order to limit log filesize.
*
* @param throttleValue
* @param unit
* @return client configuration.
*/
public ClientConfiguration setClientConnectBookieUnavailableLogThrottling(
int throttleValue, TimeUnit unit) {
setProperty(CLIENT_CONNECT_BOOKIE_UNAVAILABLE_LOG_THROTTLING, unit.toMillis(throttleValue));
return this;
}
/**
* Get the log frequency when a bookie is unavailable, in milliseconds.
*
* @return log frequency when a bookie is unavailable, in milliseconds.
*/
public long getClientConnectBookieUnavailableLogThrottlingMs() {
return getLong(CLIENT_CONNECT_BOOKIE_UNAVAILABLE_LOG_THROTTLING, 5_000L);
}
@Override
protected ClientConfiguration getThis() {
return this;
}
}
| 309 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/Configurable.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.conf;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Stable;
import org.apache.commons.configuration.Configuration;
/**
* Class that may be configured with a {@link Configuration}.
*/
@Public
@Stable
public interface Configurable {
/**
* Set the configuration to be used by this object.
*
* @param conf
* Configuration object to use
*/
void setConf(Configuration conf);
/**
* Return the configuration used by this object.
*
* @return configuration used by this object.
*/
Configuration getConf();
}
| 310 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.conf;
import static org.apache.bookkeeper.util.BookKeeperConstants.MAX_LOG_SIZE_LIMIT;
import com.google.common.annotations.Beta;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import java.io.File;
import java.net.URL;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.FileChannelProvider;
import org.apache.bookkeeper.bookie.InterleavedLedgerStorage;
import org.apache.bookkeeper.bookie.LedgerStorage;
import org.apache.bookkeeper.bookie.SortedLedgerStorage;
import org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage;
import org.apache.bookkeeper.common.conf.ConfigDef;
import org.apache.bookkeeper.common.conf.ConfigException;
import org.apache.bookkeeper.common.conf.ConfigKey;
import org.apache.bookkeeper.common.conf.ConfigKeyGroup;
import org.apache.bookkeeper.common.conf.Type;
import org.apache.bookkeeper.common.conf.validators.ClassValidator;
import org.apache.bookkeeper.common.conf.validators.RangeValidator;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.discover.ZKRegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.stats.NullStatsProvider;
import org.apache.bookkeeper.stats.StatsProvider;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.lang3.StringUtils;
/**
* Configuration manages server-side settings.
*/
public class ServerConfiguration extends AbstractConfiguration<ServerConfiguration> {
private static final int SECOND = 1000;
// Ledger Storage Settings
private static final ConfigKeyGroup GROUP_LEDGER_STORAGE = ConfigKeyGroup.builder("ledgerstorage")
.description("Ledger Storage related settings")
.order(10) // place a place holder here
.build();
protected static final String LEDGER_STORAGE_CLASS = "ledgerStorageClass";
protected static final ConfigKey LEDGER_STORAGE_CLASS_KEY = ConfigKey.builder(LEDGER_STORAGE_CLASS)
.type(Type.CLASS)
.description("Ledger storage implementation class")
.defaultValue(SortedLedgerStorage.class)
.optionValues(Lists.newArrayList(
InterleavedLedgerStorage.class.getName(),
SortedLedgerStorage.class.getName(),
DbLedgerStorage.class.getName()
))
.validator(ClassValidator.of(LedgerStorage.class))
.group(GROUP_LEDGER_STORAGE)
.build();
// Entry Log Parameters
private static final ConfigKeyGroup GROUP_LEDGER_STORAGE_ENTRY_LOGGER = ConfigKeyGroup.builder("entrylogger")
.description("EntryLogger related settings")
.order(11)
.build();
protected static final String ENTRY_LOG_SIZE_LIMIT = "logSizeLimit";
protected static final ConfigKey ENTRY_LOG_SIZE_LIMIT_KEY = ConfigKey.builder(ENTRY_LOG_SIZE_LIMIT)
.type(Type.LONG)
.description("Max file size of entry logger, in bytes")
.documentation("A new entry log file will be created when the old one reaches this file size limitation")
.defaultValue(MAX_LOG_SIZE_LIMIT)
.validator(RangeValidator.between(0, MAX_LOG_SIZE_LIMIT))
.group(GROUP_LEDGER_STORAGE_ENTRY_LOGGER)
.build();
protected static final String ENTRY_LOG_FILE_PREALLOCATION_ENABLED = "entryLogFilePreallocationEnabled";
protected static final String FORCE_ALLOW_COMPACTION = "forceAllowCompaction";
protected static final String MINOR_COMPACTION_INTERVAL = "minorCompactionInterval";
protected static final String MINOR_COMPACTION_THRESHOLD = "minorCompactionThreshold";
protected static final String MINOR_COMPACTION_MAX_TIME_MILLIS = "minorCompactionMaxTimeMillis";
protected static final String MAJOR_COMPACTION_INTERVAL = "majorCompactionInterval";
protected static final String MAJOR_COMPACTION_THRESHOLD = "majorCompactionThreshold";
protected static final String MAJOR_COMPACTION_MAX_TIME_MILLIS = "majorCompactionMaxTimeMillis";
protected static final String IS_THROTTLE_BY_BYTES = "isThrottleByBytes";
protected static final String COMPACTION_MAX_OUTSTANDING_REQUESTS = "compactionMaxOutstandingRequests";
protected static final String COMPACTION_RATE = "compactionRate";
protected static final String COMPACTION_RATE_BY_ENTRIES = "compactionRateByEntries";
protected static final String COMPACTION_RATE_BY_BYTES = "compactionRateByBytes";
// Gc Parameters
protected static final String GC_WAIT_TIME = "gcWaitTime";
protected static final String IS_FORCE_GC_ALLOW_WHEN_NO_SPACE = "isForceGCAllowWhenNoSpace";
protected static final String GC_OVERREPLICATED_LEDGER_WAIT_TIME = "gcOverreplicatedLedgerWaitTime";
protected static final String GC_OVERREPLICATED_LEDGER_MAX_CONCURRENT_REQUESTS =
"gcOverreplicatedLedgerMaxConcurrentRequests";
protected static final String USE_TRANSACTIONAL_COMPACTION = "useTransactionalCompaction";
protected static final String VERIFY_METADATA_ON_GC = "verifyMetadataOnGC";
protected static final String GC_ENTRYLOGMETADATA_CACHE_ENABLED = "gcEntryLogMetadataCacheEnabled";
protected static final String GC_ENTRYLOG_METADATA_CACHE_PATH = "gcEntryLogMetadataCachePath";
protected static final String USE_TARGET_ENTRYLOG_SIZE_FOR_GC = "useTargetEntryLogSizeForGc";
// Scrub Parameters
protected static final String LOCAL_SCRUB_PERIOD = "localScrubInterval";
protected static final String LOCAL_SCRUB_RATE_LIMIT = "localScrubRateLimit";
// Sync Parameters
protected static final String FLUSH_INTERVAL = "flushInterval";
protected static final String FLUSH_ENTRYLOG_INTERVAL_BYTES = "flushEntrylogBytes";
// Bookie death watch interval
protected static final String DEATH_WATCH_INTERVAL = "bookieDeathWatchInterval";
// Ledger Cache Parameters
protected static final String OPEN_FILE_LIMIT = "openFileLimit";
protected static final String PAGE_LIMIT = "pageLimit";
protected static final String PAGE_SIZE = "pageSize";
protected static final String FILEINFO_CACHE_INITIAL_CAPACITY = "fileInfoCacheInitialCapacity";
protected static final String FILEINFO_MAX_IDLE_TIME = "fileInfoMaxIdleTime";
protected static final String FILEINFO_FORMAT_VERSION_TO_WRITE = "fileInfoFormatVersionToWrite";
// Journal Parameters
protected static final String MAX_JOURNAL_SIZE = "journalMaxSizeMB";
protected static final String MAX_BACKUP_JOURNALS = "journalMaxBackups";
protected static final String JOURNAL_SYNC_DATA = "journalSyncData";
protected static final String JOURNAL_WRITE_DATA = "journalWriteData";
protected static final String JOURNAL_ADAPTIVE_GROUP_WRITES = "journalAdaptiveGroupWrites";
protected static final String JOURNAL_MAX_GROUP_WAIT_MSEC = "journalMaxGroupWaitMSec";
protected static final String JOURNAL_BUFFERED_WRITES_THRESHOLD = "journalBufferedWritesThreshold";
protected static final String JOURNAL_BUFFERED_ENTRIES_THRESHOLD = "journalBufferedEntriesThreshold";
protected static final String JOURNAL_FLUSH_WHEN_QUEUE_EMPTY = "journalFlushWhenQueueEmpty";
protected static final String JOURNAL_REMOVE_FROM_PAGE_CACHE = "journalRemoveFromPageCache";
protected static final String JOURNAL_PRE_ALLOC_SIZE = "journalPreAllocSizeMB";
protected static final String JOURNAL_WRITE_BUFFER_SIZE = "journalWriteBufferSizeKB";
protected static final String JOURNAL_ALIGNMENT_SIZE = "journalAlignmentSize";
protected static final String NUM_JOURNAL_CALLBACK_THREADS = "numJournalCallbackThreads";
protected static final String JOURNAL_FORMAT_VERSION_TO_WRITE = "journalFormatVersionToWrite";
protected static final String JOURNAL_QUEUE_SIZE = "journalQueueSize";
protected static final String JOURNAL_MAX_MEMORY_SIZE_MB = "journalMaxMemorySizeMb";
protected static final String JOURNAL_PAGECACHE_FLUSH_INTERVAL_MSEC = "journalPageCacheFlushIntervalMSec";
protected static final String JOURNAL_CHANNEL_PROVIDER = "journalChannelProvider";
protected static final String JOURNAL_REUSE_FILES = "journalReuseFiles";
// backpressure control
protected static final String MAX_ADDS_IN_PROGRESS_LIMIT = "maxAddsInProgressLimit";
protected static final String MAX_READS_IN_PROGRESS_LIMIT = "maxReadsInProgressLimit";
protected static final String CLOSE_CHANNEL_ON_RESPONSE_TIMEOUT = "closeChannelOnResponseTimeout";
protected static final String WAIT_TIMEOUT_ON_RESPONSE_BACKPRESSURE = "waitTimeoutOnResponseBackpressureMs";
// Bookie Parameters
protected static final String BOOKIE_PORT = "bookiePort";
protected static final String LISTENING_INTERFACE = "listeningInterface";
protected static final String ALLOW_LOOPBACK = "allowLoopback";
protected static final String ADVERTISED_ADDRESS = "advertisedAddress";
protected static final String BOOKIE_ID = "bookieId";
protected static final String ALLOW_EPHEMERAL_PORTS = "allowEphemeralPorts";
protected static final String JOURNAL_DIR = "journalDirectory";
protected static final String JOURNAL_DIRS = "journalDirectories";
protected static final String LEDGER_DIRS = "ledgerDirectories";
protected static final String INDEX_DIRS = "indexDirectories";
protected static final String ALLOW_STORAGE_EXPANSION = "allowStorageExpansion";
// NIO and Netty Parameters
protected static final String SERVER_TCP_NODELAY = "serverTcpNoDelay";
protected static final String SERVER_SOCK_KEEPALIVE = "serverSockKeepalive";
protected static final String SERVER_SOCK_LINGER = "serverTcpLinger";
protected static final String SERVER_WRITEBUFFER_LOW_WATER_MARK = "serverWriteBufferLowWaterMark";
protected static final String SERVER_WRITEBUFFER_HIGH_WATER_MARK = "serverWriteBufferHighWaterMark";
protected static final String SERVER_NUM_ACCEPTOR_THREADS = "serverNumAcceptorThreads";
protected static final String SERVER_NUM_IO_THREADS = "serverNumIOThreads";
// Zookeeper Parameters
protected static final String ZK_RETRY_BACKOFF_START_MS = "zkRetryBackoffStartMs";
protected static final String ZK_RETRY_BACKOFF_MAX_MS = "zkRetryBackoffMaxMs";
protected static final String OPEN_LEDGER_REREPLICATION_GRACE_PERIOD = "openLedgerRereplicationGracePeriod";
protected static final String LOCK_RELEASE_OF_FAILED_LEDGER_GRACE_PERIOD = "lockReleaseOfFailedLedgerGracePeriod";
//ReadOnly mode support on all disk full
protected static final String READ_ONLY_MODE_ENABLED = "readOnlyModeEnabled";
protected static final String READ_ONLY_MODE_ON_ANY_DISK_FULL_ENABLED = "readOnlyModeOnAnyDiskFullEnabled";
//Whether the bookie is force started in ReadOnly mode
protected static final String FORCE_READ_ONLY_BOOKIE = "forceReadOnlyBookie";
//Whether to persist the bookie status
protected static final String PERSIST_BOOKIE_STATUS_ENABLED = "persistBookieStatusEnabled";
//Disk utilization
protected static final String DISK_USAGE_THRESHOLD = "diskUsageThreshold";
protected static final String DISK_USAGE_WARN_THRESHOLD = "diskUsageWarnThreshold";
protected static final String DISK_USAGE_LWM_THRESHOLD = "diskUsageLwmThreshold";
protected static final String DISK_CHECK_INTERVAL = "diskCheckInterval";
// Replication parameters
protected static final String AUDITOR_PERIODIC_CHECK_INTERVAL = "auditorPeriodicCheckInterval";
protected static final String AUDITOR_PERIODIC_BOOKIE_CHECK_INTERVAL = "auditorPeriodicBookieCheckInterval";
protected static final String AUDITOR_PERIODIC_PLACEMENT_POLICY_CHECK_INTERVAL =
"auditorPeriodicPlacementPolicyCheckInterval";
protected static final String REPAIRED_PLACEMENT_POLICY_NOT_ADHERING_BOOKIE_ENABLED =
"repairedPlacementPolicyNotAdheringBookieEnabled";
protected static final String AUDITOR_LEDGER_VERIFICATION_PERCENTAGE = "auditorLedgerVerificationPercentage";
protected static final String AUTO_RECOVERY_DAEMON_ENABLED = "autoRecoveryDaemonEnabled";
protected static final String LOST_BOOKIE_RECOVERY_DELAY = "lostBookieRecoveryDelay";
protected static final String RW_REREPLICATE_BACKOFF_MS = "rwRereplicateBackoffMs";
protected static final String UNDERREPLICATED_LEDGER_RECOVERY_GRACE_PERIOD =
"underreplicatedLedgerRecoveryGracePeriod";
protected static final String AUDITOR_REPLICAS_CHECK_INTERVAL = "auditorReplicasCheckInterval";
protected static final String AUDITOR_MAX_NUMBER_OF_CONCURRENT_OPEN_LEDGER_OPERATIONS =
"auditorMaxNumberOfConcurrentOpenLedgerOperations";
protected static final String AUDITOR_ACQUIRE_CONCURRENT_OPEN_LEDGER_OPERATIONS_TIMEOUT_MSEC =
"auditorAcquireConcurrentOpenLedgerOperationsTimeOutMSec";
protected static final String IN_FLIGHT_READ_ENTRY_NUM_IN_LEDGER_CHECKER = "inFlightReadEntryNumInLedgerChecker";
// Worker Thread parameters.
protected static final String NUM_ADD_WORKER_THREADS = "numAddWorkerThreads";
protected static final String NUM_READ_WORKER_THREADS = "numReadWorkerThreads";
protected static final String MAX_PENDING_READ_REQUESTS_PER_THREAD = "maxPendingReadRequestsPerThread";
protected static final String MAX_PENDING_ADD_REQUESTS_PER_THREAD = "maxPendingAddRequestsPerThread";
protected static final String NUM_LONG_POLL_WORKER_THREADS = "numLongPollWorkerThreads";
protected static final String NUM_HIGH_PRIORITY_WORKER_THREADS = "numHighPriorityWorkerThreads";
protected static final String READ_WORKER_THREADS_THROTTLING_ENABLED = "readWorkerThreadsThrottlingEnabled";
// Long poll parameters
protected static final String REQUEST_TIMER_TICK_DURATION_MILLISEC = "requestTimerTickDurationMs";
protected static final String REQUEST_TIMER_NO_OF_TICKS = "requestTimerNumTicks";
protected static final String READ_BUFFER_SIZE = "readBufferSizeBytes";
protected static final String WRITE_BUFFER_SIZE = "writeBufferSizeBytes";
// Whether the bookie should use its hostname or ipaddress for the
// registration.
protected static final String USE_HOST_NAME_AS_BOOKIE_ID = "useHostNameAsBookieID";
protected static final String USE_SHORT_HOST_NAME = "useShortHostName";
protected static final String ENABLE_LOCAL_TRANSPORT = "enableLocalTransport";
protected static final String DISABLE_SERVER_SOCKET_BIND = "disableServerSocketBind";
protected static final String SORTED_LEDGER_STORAGE_ENABLED = "sortedLedgerStorageEnabled";
protected static final String SKIP_LIST_SIZE_LIMIT = "skipListSizeLimit";
protected static final String SKIP_LIST_CHUNK_SIZE_ENTRY = "skipListArenaChunkSize";
protected static final String SKIP_LIST_MAX_ALLOC_ENTRY = "skipListArenaMaxAllocSize";
// Statistics Parameters
protected static final String ENABLE_STATISTICS = "enableStatistics";
protected static final String STATS_PROVIDER_CLASS = "statsProviderClass";
protected static final String SANITY_CHECK_METRICS_ENABLED = "sanityCheckMetricsEnabled";
// Rx adaptive ByteBuf allocator parameters
protected static final String BYTEBUF_ALLOCATOR_SIZE_INITIAL = "byteBufAllocatorSizeInitial";
protected static final String BYTEBUF_ALLOCATOR_SIZE_MIN = "byteBufAllocatorSizeMin";
protected static final String BYTEBUF_ALLOCATOR_SIZE_MAX = "byteBufAllocatorSizeMax";
// Bookie auth provider factory class name
protected static final String BOOKIE_AUTH_PROVIDER_FACTORY_CLASS = "bookieAuthProviderFactoryClass";
protected static final String MIN_USABLESIZE_FOR_INDEXFILE_CREATION = "minUsableSizeForIndexFileCreation";
protected static final String MIN_USABLESIZE_FOR_ENTRYLOG_CREATION = "minUsableSizeForEntryLogCreation";
protected static final String MIN_USABLESIZE_FOR_HIGH_PRIORITY_WRITES = "minUsableSizeForHighPriorityWrites";
protected static final String ALLOW_MULTIPLEDIRS_UNDER_SAME_DISKPARTITION =
"allowMultipleDirsUnderSameDiskPartition";
// Http Server parameters
protected static final String HTTP_SERVER_ENABLED = "httpServerEnabled";
protected static final String HTTP_SERVER_PORT = "httpServerPort";
protected static final String HTTP_SERVER_HOST = "httpServerHost";
protected static final String HTTP_SERVER_TLS_ENABLE = "httpServerTlsEnable";
protected static final String HTTP_SERVER_KEY_STORE_PATH = "httpServerKeyStorePath";
protected static final String HTTP_SERVER_KEY_STORE_PASSWORD = "httpServerKeyStorePassword";
protected static final String HTTP_SERVER_TRUST_STORE_PATH = "httpServerTrustStorePath";
protected static final String HTTP_SERVER_TRUST_STORE_PASSWORD = "httpServerTrustStorePassword";
// Lifecycle Components
protected static final String EXTRA_SERVER_COMPONENTS = "extraServerComponents";
protected static final String IGNORE_EXTRA_SERVER_COMPONENTS_STARTUP_FAILURES =
"ignoreExtraServerComponentsStartupFailures";
// Registration
protected static final String REGISTRATION_MANAGER_CLASS = "registrationManagerClass";
// Stats
protected static final String ENABLE_TASK_EXECUTION_STATS = "enableTaskExecutionStats";
/*
* config specifying if the entrylog per ledger is enabled or not.
*/
protected static final String ENTRY_LOG_PER_LEDGER_ENABLED = "entryLogPerLedgerEnabled";
// In the case of multipleentrylogs, multiple threads can be used to flush the memtable parallelly.
protected static final String NUMBER_OF_MEMTABLE_FLUSH_THREADS = "numOfMemtableFlushThreads";
/*
* config specifying if the entrylog per ledger is enabled, then the amount
* of time EntryLogManagerForEntryLogPerLedger should wait for closing the
* entrylog file after the last addEntry call for that ledger, if explicit
* writeclose for that ledger is not received.
*/
protected static final String ENTRYLOGMAP_ACCESS_EXPIRYTIME_INSECONDS = "entrylogMapAccessExpiryTimeInSeconds";
/*
* in entryLogPerLedger feature, this specifies the maximum number of
* entrylogs that can be active at a given point in time. If there are more
* number of active entryLogs then the maximumNumberOfActiveEntryLogs then
* the entrylog will be evicted from the cache.
*/
protected static final String MAXIMUM_NUMBER_OF_ACTIVE_ENTRYLOGS = "maximumNumberOfActiveEntryLogs";
/*
* in EntryLogManagerForEntryLogPerLedger, this config value specifies the
* metrics cache size limits in multiples of entrylogMap cache size limits.
*/
protected static final String ENTRY_LOG_PER_LEDGER_COUNTER_LIMITS_MULT_FACTOR =
"entryLogPerLedgerCounterLimitsMultFactor";
// Perform local consistency check on bookie startup
protected static final String LOCAL_CONSISTENCY_CHECK_ON_STARTUP = "localConsistencyCheckOnStartup";
// Certificate role based authorization
protected static final String AUTHORIZED_ROLES = "authorizedRoles";
protected static final String DATA_INTEGRITY_CHECKING_ENABLED = "dataIntegrityChecking";
protected static final String DATA_INTEGRITY_COOKIE_STAMPING_ENABLED = "dataIntegrityStampMissingCookies";
// Used for default,command until or test case
protected static final String DEFAULT_ROCKSDB_CONF = "defaultRocksdbConf";
// Used for ledgers db, doesn't need particular configuration
protected static final String ENTRY_LOCATION_ROCKSDB_CONF = "entryLocationRocksdbConf";
// Used for location index, lots of writes and much bigger dataset
protected static final String LEDGER_METADATA_ROCKSDB_CONF = "ledgerMetadataRocksdbConf";
protected static final String MAX_OPERATION_NUMBERS_IN_SINGLE_ROCKSDB_WRITE_BATCH =
"maxOperationNumbersInSingleRocksdbWriteBatch";
protected static final String SKIP_REPLAY_JOURNAL_INVALID_RECORD = "skipReplayJournalInvalidRecord";
/**
* Construct a default configuration object.
*/
public ServerConfiguration() {
super();
}
/**
* Construct a configuration based on other configuration.
*
* @param conf
* Other configuration
*/
public ServerConfiguration(AbstractConfiguration conf) {
super();
loadConf(conf);
}
/**
* Get entry logger size limitation.
*
* @return entry logger size limitation
*/
public long getEntryLogSizeLimit() {
return ENTRY_LOG_SIZE_LIMIT_KEY.getLong(this);
}
/**
* Set entry logger size limitation.
*
* @param logSizeLimit
* new log size limitation
*/
public ServerConfiguration setEntryLogSizeLimit(long logSizeLimit) {
ENTRY_LOG_SIZE_LIMIT_KEY.set(this, logSizeLimit);
return this;
}
/**
* Is entry log file preallocation enabled.
*
* @return whether entry log file preallocation is enabled or not.
*/
public boolean isEntryLogFilePreAllocationEnabled() {
return this.getBoolean(ENTRY_LOG_FILE_PREALLOCATION_ENABLED, true);
}
/**
* Enable/disable entry log file preallocation.
*
* @param enabled
* enable/disable entry log file preallocation.
* @return server configuration object.
*/
public ServerConfiguration setEntryLogFilePreAllocationEnabled(boolean enabled) {
this.setProperty(ENTRY_LOG_FILE_PREALLOCATION_ENABLED, enabled);
return this;
}
/**
* Get Garbage collection wait time. Default value is 10 minutes.
* The guideline is not to set a too low value for this, if using zookeeper based
* ledger manager. And it would be nice to align with the average lifecyle time of
* ledgers in the system.
*
* @return gc wait time
*/
public long getGcWaitTime() {
return this.getLong(GC_WAIT_TIME, 600000);
}
/**
* Set garbage collection wait time.
*
* @param gcWaitTime
* gc wait time
* @return server configuration
*/
public ServerConfiguration setGcWaitTime(long gcWaitTime) {
this.setProperty(GC_WAIT_TIME, Long.toString(gcWaitTime));
return this;
}
/**
* Get wait time in millis for garbage collection of overreplicated ledgers.
*
* @return gc wait time
*/
public long getGcOverreplicatedLedgerWaitTimeMillis() {
return this.getLong(GC_OVERREPLICATED_LEDGER_WAIT_TIME, TimeUnit.DAYS.toMillis(1));
}
/**
* Set wait time for garbage collection of overreplicated ledgers. Default: 1 day
*
* <p>A ledger can be overreplicated under the following circumstances:
* 1. The ledger with few entries has bk1 and bk2 as its ensemble.
* 2. bk1 crashes.
* 3. bk3 replicates the ledger from bk2 and updates the ensemble to bk2 and bk3.
* 4. bk1 comes back up.
* 5. Now there are 3 copies of the ledger.
*
* @param gcWaitTime
* @return server configuration
*/
public ServerConfiguration setGcOverreplicatedLedgerWaitTime(long gcWaitTime, TimeUnit unit) {
this.setProperty(GC_OVERREPLICATED_LEDGER_WAIT_TIME, Long.toString(unit.toMillis(gcWaitTime)));
return this;
}
/**
* Max number of concurrent requests in garbage collection of overreplicated ledgers.
*
* @return max number of concurrent requests
*/
public int getGcOverreplicatedLedgerMaxConcurrentRequests() {
return this.getInt(GC_OVERREPLICATED_LEDGER_MAX_CONCURRENT_REQUESTS, 1000);
}
/**
* Max number of concurrent requests in garbage collection of overreplicated ledgers. Default: 1000
*
* @param gcOverreplicatedLedgerMaxConcurrentRequests
* @return server configuration
*/
public ServerConfiguration setGcOverreplicatedLedgerMaxConcurrentRequests(
int gcOverreplicatedLedgerMaxConcurrentRequests) {
this.setProperty(GC_OVERREPLICATED_LEDGER_MAX_CONCURRENT_REQUESTS,
Integer.toString(gcOverreplicatedLedgerMaxConcurrentRequests));
return this;
}
/**
* Get whether to use transactional compaction and using a separate log for compaction or not.
*
* @return use transactional compaction
*/
public boolean getUseTransactionalCompaction() {
return this.getBoolean(USE_TRANSACTIONAL_COMPACTION, false);
}
/**
* Set whether to use transactional compaction and using a separate log for compaction or not.
* @param useTransactionalCompaction
* @return server configuration
*/
public ServerConfiguration setUseTransactionalCompaction(boolean useTransactionalCompaction) {
this.setProperty(USE_TRANSACTIONAL_COMPACTION, useTransactionalCompaction);
return this;
}
/**
* Get whether the bookie is configured to double check prior to gc.
*
* @return use transactional compaction
*/
public boolean getVerifyMetadataOnGC() {
return this.getBoolean(VERIFY_METADATA_ON_GC, false);
}
/**
* Set whether the bookie is configured to double check prior to gc.
* @param verifyMetadataOnGC
* @return server configuration
*/
public ServerConfiguration setVerifyMetadataOnGc(boolean verifyMetadataOnGC) {
this.setProperty(VERIFY_METADATA_ON_GC, verifyMetadataOnGC);
return this;
}
/**
* Get whether the bookie is configured to use persistent
* entrylogMetadataMap.
* @return use persistent entry-log metadata map
*/
public boolean isGcEntryLogMetadataCacheEnabled() {
return this.getBoolean(GC_ENTRYLOGMETADATA_CACHE_ENABLED, false);
}
/**
* Set whether the bookie is configured to use persistent
* entrylogMetadataMap.
* @param gcEntryLogMetadataCacheEnabled
* @return server configuration
*/
public ServerConfiguration setGcEntryLogMetadataCacheEnabled(
boolean gcEntryLogMetadataCacheEnabled) {
this.setProperty(GC_ENTRYLOGMETADATA_CACHE_ENABLED, gcEntryLogMetadataCacheEnabled);
return this;
}
/**
* Get directory to persist Entrylog metadata if
* gcPersistentEntrylogMetadataMapEnabled is true.
*
* @return entrylog metadata-map persistent store dir path.(default: it
* creates a sub-directory under each ledger
* directory with name "metadata-cache". If it set, it only works for one ledger directory
* configured for ledgerDirectories).
*/
public String getGcEntryLogMetadataCachePath() {
return getString(GC_ENTRYLOG_METADATA_CACHE_PATH, null);
}
/**
* Set directory to persist Entrylog metadata if gcPersistentEntrylogMetadataMapEnabled is true.
* If it set, it only works for one ledger directory configured for ledgerDirectories. For multi ledgerDirectory
* configured, keep the default value is the best practice.
*
* @param gcEntrylogMetadataCachePath
* @return server configuration.
*/
public ServerConfiguration setGcEntryLogMetadataCachePath(String gcEntrylogMetadataCachePath) {
this.setProperty(GC_ENTRYLOG_METADATA_CACHE_PATH, gcEntrylogMetadataCachePath);
return this;
}
public boolean isUseTargetEntryLogSizeForGc() {
return getBoolean(USE_TARGET_ENTRYLOG_SIZE_FOR_GC, false);
}
public ServerConfiguration setUseTargetEntryLogSizeForGc(boolean useTargetEntryLogSizeForGc) {
this.setProperty(USE_TARGET_ENTRYLOG_SIZE_FOR_GC, useTargetEntryLogSizeForGc);
return this;
}
/**
* Get whether local scrub is enabled.
*
* @return Whether local scrub is enabled.
*/
public boolean isLocalScrubEnabled() {
return this.getLocalScrubPeriod() > 0;
}
/**
* Get local scrub interval.
*
* @return Number of seconds between scrubs, {@literal <=}0 for disabled.
*/
public long getLocalScrubPeriod() {
return this.getLong(LOCAL_SCRUB_PERIOD, 0);
}
/**
* Set local scrub period in seconds ({@literal <=}0 for disabled). Scrub will be scheduled at delays
* chosen from the interval (.5 * interval, 1.5 * interval)
*/
public void setLocalScrubPeriod(long period) {
this.setProperty(LOCAL_SCRUB_PERIOD, period);
}
/**
* Get local scrub rate limit (entries/second).
*
* @return Max number of entries to scrub per second, 0 for disabled.
*/
public double getLocalScrubRateLimit() {
return this.getDouble(LOCAL_SCRUB_RATE_LIMIT, 60);
}
/**
* Get local scrub rate limit (entries/second).
*
* @param scrubRateLimit Max number of entries per second to scan.
*/
public void setLocalScrubRateLimit(double scrubRateLimit) {
this.setProperty(LOCAL_SCRUB_RATE_LIMIT, scrubRateLimit);
}
/**
* Get flush interval. Default value is 10 second. It isn't useful to decrease
* this value, since ledger storage only checkpoints when an entry logger file
* is rolled.
*
* @return flush interval
*/
public int getFlushInterval() {
return this.getInt(FLUSH_INTERVAL, 10000);
}
/**
* Set flush interval.
*
* @param flushInterval
* Flush Interval
* @return server configuration
*/
public ServerConfiguration setFlushInterval(int flushInterval) {
this.setProperty(FLUSH_INTERVAL, Integer.toString(flushInterval));
return this;
}
/**
* Set entry log flush interval in bytes.
*
* <p>Default is 0. 0 or less disables this feature and effectively flush
* happens on log rotation.
*
* <p>Flushing in smaller chunks but more frequently reduces spikes in disk
* I/O. Flushing too frequently may also affect performance negatively.
*
* @return Entry log flush interval in bytes
*/
public long getFlushIntervalInBytes() {
return this.getLong(FLUSH_ENTRYLOG_INTERVAL_BYTES, 0);
}
/**
* Set entry log flush interval in bytes.
*
* @param flushInterval in bytes
* @return server configuration
*/
public ServerConfiguration setFlushIntervalInBytes(long flushInterval) {
this.setProperty(FLUSH_ENTRYLOG_INTERVAL_BYTES, Long.toString(flushInterval));
return this;
}
/**
* Get bookie death watch interval.
*
* @return watch interval
*/
public int getDeathWatchInterval() {
return this.getInt(DEATH_WATCH_INTERVAL, 1000);
}
/**
* Get open file limit. Default value is 20000.
*
* @return max number of files to open
*/
public int getOpenFileLimit() {
return this.getInt(OPEN_FILE_LIMIT, 20000);
}
/**
* Set limitation of number of open files.
*
* @param fileLimit
* Limitation of number of open files.
* @return server configuration
*/
public ServerConfiguration setOpenFileLimit(int fileLimit) {
setProperty(OPEN_FILE_LIMIT, fileLimit);
return this;
}
/**
* Get limitation number of index pages in ledger cache.
*
* @return max number of index pages in ledger cache
*/
public int getPageLimit() {
return this.getInt(PAGE_LIMIT, -1);
}
/**
* Set limitation number of index pages in ledger cache.
*
* @param pageLimit
* Limitation of number of index pages in ledger cache.
* @return server configuration
*/
public ServerConfiguration setPageLimit(int pageLimit) {
this.setProperty(PAGE_LIMIT, pageLimit);
return this;
}
/**
* Get page size.
*
* @return page size in ledger cache
*/
public int getPageSize() {
return this.getInt(PAGE_SIZE, 8192);
}
/**
* Set page size.
*
* @see #getPageSize()
*
* @param pageSize
* Page Size
* @return Server Configuration
*/
public ServerConfiguration setPageSize(int pageSize) {
this.setProperty(PAGE_SIZE, pageSize);
return this;
}
/**
* Get the minimum total size for the internal file info cache tables.
* Providing a large enough estimate at construction time avoids the need for
* expensive resizing operations later, but setting this value unnecessarily high
* wastes memory.
*
* @return minimum size of initial file info cache.
*/
public int getFileInfoCacheInitialCapacity() {
return getInt(FILEINFO_CACHE_INITIAL_CAPACITY, Math.max(getOpenFileLimit() / 4, 64));
}
/**
* Set the minimum total size for the internal file info cache tables for initialization.
*
* @param initialCapacity
* Initial capacity of file info cache table.
* @return server configuration instance.
*/
public ServerConfiguration setFileInfoCacheInitialCapacity(int initialCapacity) {
setProperty(FILEINFO_CACHE_INITIAL_CAPACITY, initialCapacity);
return this;
}
/**
* Get the max idle time allowed for a open file info existed in file info cache.
* If the file info is idle for a long time, exceed the given time period. The file
* info will be evicted and closed. If the value is zero, the file info is evicted
* only when opened files reached openFileLimit.
*
* @see #getOpenFileLimit
* @return max idle time of a file info in the file info cache.
*/
public long getFileInfoMaxIdleTime() {
return this.getLong(FILEINFO_MAX_IDLE_TIME, 0L);
}
/**
* Set the max idle time allowed for a open file info existed in file info cache.
*
* @param idleTime
* Idle time, in seconds.
* @see #getFileInfoMaxIdleTime
* @return server configuration object.
*/
public ServerConfiguration setFileInfoMaxIdleTime(long idleTime) {
setProperty(FILEINFO_MAX_IDLE_TIME, idleTime);
return this;
}
/**
* Get fileinfo format version to write.
*
* @return fileinfo format version to write.
*/
public int getFileInfoFormatVersionToWrite() {
return this.getInt(FILEINFO_FORMAT_VERSION_TO_WRITE, 1);
}
/**
* Set fileinfo format version to write.
*
* @param version
* fileinfo format version to write.
* @return server configuration.
*/
public ServerConfiguration setFileInfoFormatVersionToWrite(int version) {
this.setProperty(FILEINFO_FORMAT_VERSION_TO_WRITE, version);
return this;
}
/**
* Max journal file size.
*
* @return max journal file size
*/
public long getMaxJournalSizeMB() {
return this.getLong(MAX_JOURNAL_SIZE, 2 * 1024);
}
/**
* Set new max journal file size.
*
* @param maxJournalSize
* new max journal file size
* @return server configuration
*/
public ServerConfiguration setMaxJournalSizeMB(long maxJournalSize) {
this.setProperty(MAX_JOURNAL_SIZE, Long.toString(maxJournalSize));
return this;
}
/**
* How much space should we pre-allocate at a time in the journal.
*
* @return journal pre-allocation size in MB
*/
public int getJournalPreAllocSizeMB() {
return this.getInt(JOURNAL_PRE_ALLOC_SIZE, 16);
}
/**
* Size of the write buffers used for the journal.
*
* @return journal write buffer size in KB
*/
public int getJournalWriteBufferSizeKB() {
return this.getInt(JOURNAL_WRITE_BUFFER_SIZE, 64);
}
/**
* Set the size of the write buffers used for the journal.
*
* @param bufferSizeKB the size of the write buffer used for the journal, in KB.
* @return server configuration
*/
public ServerConfiguration setJournalWriteBufferSizeKB(int bufferSizeKB) {
setProperty(JOURNAL_WRITE_BUFFER_SIZE, bufferSizeKB);
return this;
}
/**
* Max number of older journal files kept.
*
* @return max number of older journal files to kept
*/
public int getMaxBackupJournals() {
return this.getInt(MAX_BACKUP_JOURNALS, 5);
}
/**
* Set max number of older journal files to kept.
*
* @param maxBackupJournals
* Max number of older journal files
* @return server configuration
*/
public ServerConfiguration setMaxBackupJournals(int maxBackupJournals) {
this.setProperty(MAX_BACKUP_JOURNALS, Integer.toString(maxBackupJournals));
return this;
}
/**
* All the journal writes and commits should be aligned to given size. If not,
* zeros will be padded to align to given size.
*
* @return journal alignment size
*/
public int getJournalAlignmentSize() {
return this.getInt(JOURNAL_ALIGNMENT_SIZE, 512);
}
/**
* Set journal alignment size.
*
* @param size
* journal alignment size.
* @return server configuration.
*/
public ServerConfiguration setJournalAlignmentSize(int size) {
this.setProperty(JOURNAL_ALIGNMENT_SIZE, size);
return this;
}
/**
* Get journal format version to write.
*
* @return journal format version to write.
*/
public int getJournalFormatVersionToWrite() {
return this.getInt(JOURNAL_FORMAT_VERSION_TO_WRITE, 6);
}
/**
* Set journal format version to write.
*
* @param version
* journal format version to write.
* @return server configuration.
*/
public ServerConfiguration setJournalFormatVersionToWrite(int version) {
this.setProperty(JOURNAL_FORMAT_VERSION_TO_WRITE, version);
return this;
}
/**
* Set the size of the journal queue.
*
* @param journalQueueSize
* the max size of journal queue
* @return server configuration.
*/
public ServerConfiguration setJournalQueueSize(int journalQueueSize) {
this.setProperty(JOURNAL_QUEUE_SIZE, journalQueueSize);
return this;
}
/**
* Get size of journal queue.
*
* @return the max size of journal queue.
*/
public int getJournalQueueSize() {
return this.getInt(JOURNAL_QUEUE_SIZE, 10_000);
}
/**
* Set the max amount of memory that can be used by the journal.
*
* @param journalMaxMemorySizeMb
* the max amount of memory for the journal
* @return server configuration.
*/
public ServerConfiguration setJournalMaxMemorySizeMb(long journalMaxMemorySizeMb) {
this.setProperty(JOURNAL_MAX_MEMORY_SIZE_MB, journalMaxMemorySizeMb);
return this;
}
/**
* Get the max amount of memory that can be used by the journal.
*
* @return the max amount of memory for the journal
*/
public long getJournalMaxMemorySizeMb() {
// Default is taking 5% of max direct memory (and convert to MB).
long estimateMaxDirectMemory = io.netty.util.internal.PlatformDependent.estimateMaxDirectMemory();
long defaultValue = (long) (estimateMaxDirectMemory * 0.05 / 1024 / 1024);
return this.getLong(JOURNAL_MAX_MEMORY_SIZE_MB, defaultValue);
}
/**
* Set PageCache flush interval in second.
*
* @Param journalPageCacheFlushInterval
* journal pageCache flush interval when journalSyncData closed
* @return server configuration.
*/
public ServerConfiguration setJournalPageCacheFlushIntervalMSec(long journalPageCacheFlushIntervalMSec) {
this.setProperty(JOURNAL_PAGECACHE_FLUSH_INTERVAL_MSEC, journalPageCacheFlushIntervalMSec);
return this;
}
/**
* Get journal pageCache flush interval.
*
* @return journal pageCache flush interval.
*/
public long getJournalPageCacheFlushIntervalMSec() {
return this.getLong(JOURNAL_PAGECACHE_FLUSH_INTERVAL_MSEC, 1000);
}
/**
* Set JournalChannelProvider classname.
* @param journalChannelProvider
* The JournalChannelProvider classname. The class must implements {@link FileChannelProvider} and
* no args constructor is needed.
* @return
*/
public ServerConfiguration setJournalChannelProvider(String journalChannelProvider) {
this.setProperty(JOURNAL_CHANNEL_PROVIDER, journalChannelProvider);
return this;
}
/**
*
* @return
*/
public String getJournalChannelProvider() {
return this.getString(JOURNAL_CHANNEL_PROVIDER, "org.apache.bookkeeper.bookie.DefaultFileChannelProvider");
}
/**
* Get reuse journal files.
* @return
*/
public boolean getJournalReuseFiles() {
return this.getBoolean(JOURNAL_REUSE_FILES, false);
}
/**
* Set reuse journal files.
* @param journalReuseFiles
* @return
*/
public ServerConfiguration setJournalReuseFiles(boolean journalReuseFiles) {
setProperty(JOURNAL_REUSE_FILES, journalReuseFiles);
return this;
}
/**
* Get max number of adds in progress. 0 == unlimited.
*
* @return Max number of adds in progress.
*/
public int getMaxAddsInProgressLimit() {
return this.getInt(MAX_ADDS_IN_PROGRESS_LIMIT, 0);
}
/**
* Set max number of adds in progress. 0 == unlimited.
*
* @param value
* max number of adds in progress.
* @return server configuration.
*/
public ServerConfiguration setMaxAddsInProgressLimit(int value) {
this.setProperty(MAX_ADDS_IN_PROGRESS_LIMIT, value);
return this;
}
/**
* Get max number of reads in progress. 0 == unlimited.
*
* @return Max number of reads in progress.
*/
public int getMaxReadsInProgressLimit() {
return this.getInt(MAX_READS_IN_PROGRESS_LIMIT, 0);
}
/**
* Set max number of reads in progress. 0 == unlimited.
*
* @param value
* max number of reads in progress.
* @return server configuration.
*/
public ServerConfiguration setMaxReadsInProgressLimit(int value) {
this.setProperty(MAX_READS_IN_PROGRESS_LIMIT, value);
return this;
}
/**
* Configures action in case if server timed out sending response to the client.
* true == close the channel and drop response
* false == drop response
* Requires waitTimeoutOnBackpressureMs >= 0 otherwise ignored.
*
* @return value indicating if channel should be closed.
*/
public boolean getCloseChannelOnResponseTimeout(){
return this.getBoolean(CLOSE_CHANNEL_ON_RESPONSE_TIMEOUT, false);
}
/**
* Configures action in case if server timed out sending response to the client.
* true == close the channel and drop response
* false == drop response
* Requires waitTimeoutOnBackpressureMs >= 0 otherwise ignored.
*
* @param value
* @return server configuration.
*/
public ServerConfiguration setCloseChannelOnResponseTimeout(boolean value) {
this.setProperty(CLOSE_CHANNEL_ON_RESPONSE_TIMEOUT, value);
return this;
}
/**
* Timeout controlling wait on response send in case of unresponsive client
* (i.e. client in long GC etc.)
*
* @return timeout value
* negative value disables the feature
* 0 to allow request to fail immediately
* Default is -1 (disabled)
*/
public long getWaitTimeoutOnResponseBackpressureMillis() {
return getLong(WAIT_TIMEOUT_ON_RESPONSE_BACKPRESSURE, -1);
}
/**
* Timeout controlling wait on response send in case of unresponsive client
* (i.e. client in long GC etc.)
*
* @param value
* negative value disables the feature
* 0 to allow request to fail immediately
* Default is -1 (disabled)
* @return client configuration.
*/
public ServerConfiguration setWaitTimeoutOnResponseBackpressureMillis(long value) {
setProperty(WAIT_TIMEOUT_ON_RESPONSE_BACKPRESSURE, value);
return this;
}
/**
* Get bookie port that bookie server listen on.
*
* @return bookie port
*/
public int getBookiePort() {
return this.getInt(BOOKIE_PORT, 3181);
}
/**
* Set new bookie port that bookie server listen on.
*
* @param port
* Port to listen on
* @return server configuration
*/
public ServerConfiguration setBookiePort(int port) {
this.setProperty(BOOKIE_PORT, Integer.toString(port));
return this;
}
/**
* Get the network interface that the bookie should
* listen for connections on. If this is null, then the bookie
* will listen for connections on all interfaces.
*
* @return the network interface to listen on, e.g. eth0, or
* null if none is specified
*/
public String getListeningInterface() {
return this.getString(LISTENING_INTERFACE);
}
/**
* Set the network interface that the bookie should listen on.
* If not set, the bookie will listen on all interfaces.
*
* @param iface the interface to listen on
*/
public ServerConfiguration setListeningInterface(String iface) {
this.setProperty(LISTENING_INTERFACE, iface);
return this;
}
/**
* Is the bookie allowed to use a loopback interface as its primary
* interface(i.e. the interface it uses to establish its identity)?
*
* <p>By default, loopback interfaces are not allowed as the primary
* interface.
*
* <p>Using a loopback interface as the primary interface usually indicates
* a configuration error. For example, its fairly common in some VPS setups
* to not configure a hostname, or to have the hostname resolve to
* 127.0.0.1. If this is the case, then all bookies in the cluster will
* establish their identities as 127.0.0.1:3181, and only one will be able
* to join the cluster. For VPSs configured like this, you should explicitly
* set the listening interface.
*
* @see #setListeningInterface(String)
* @return whether a loopback interface can be used as the primary interface
*/
public boolean getAllowLoopback() {
return this.getBoolean(ALLOW_LOOPBACK, false);
}
/**
* Configure the bookie to allow loopback interfaces to be used
* as the primary bookie interface.
*
* @see #getAllowLoopback
* @param allow whether to allow loopback interfaces
* @return server configuration
*/
public ServerConfiguration setAllowLoopback(boolean allow) {
this.setProperty(ALLOW_LOOPBACK, allow);
return this;
}
/**
* Get the configured BookieId for the bookie.
*
* <p>If present, this setting will take precedence over the
* automatic BookieId generation, based on Network Addresses.
*
* @see #setBookieId(java.lang.String)
* @see #getAdvertisedAddress()
* @return the configure address to be advertised
*/
public String getBookieId() {
return this.getString(BOOKIE_ID, null);
}
/**
* Configure the bookie to advertise a specific BookieId.
*
* <p>By default, a bookie will advertise a BookieId computed
* from the primary network endpoint addresss.
*
* @see #getBookieId()
* @see #setAdvertisedAddress(java.lang.String)
* @param bookieId the bookie id
*
* @return server configuration
*/
public ServerConfiguration setBookieId(String bookieId) {
BookieId.parse(bookieId);
this.setProperty(BOOKIE_ID, bookieId);
return this;
}
/**
* Get the configured advertised address for the bookie.
*
* <p>If present, this setting will take precedence over the
* {@link #setListeningInterface(String)} and
* {@link #setUseHostNameAsBookieID(boolean)}.
*
* @see #setAdvertisedAddress(String)
* @return the configure address to be advertised
*/
public String getAdvertisedAddress() {
return this.getString(ADVERTISED_ADDRESS, null);
}
/**
* Configure the bookie to advertise a specific address.
*
* <p>By default, a bookie will advertise either its own IP or hostname,
* depending on the {@link #getUseHostNameAsBookieID()} setting.
*
* <p>When the advertised is set to a non-empty string, the bookie will
* register and advertise using this address.
*
* <p>If present, this setting will take precedence over the
* {@link #setListeningInterface(String)} and
* {@link #setUseHostNameAsBookieID(boolean)}.
*
* @see #getAdvertisedAddress()
* @param advertisedAddress
* whether to allow loopback interfaces
* @return server configuration
*/
public ServerConfiguration setAdvertisedAddress(String advertisedAddress) {
this.setProperty(ADVERTISED_ADDRESS, advertisedAddress);
return this;
}
/**
* Is the bookie allowed to use an ephemeral port (port 0) as its server port.
*
* <p>By default, an ephemeral port is not allowed. Using an ephemeral port
* as the service port usually indicates a configuration error. However, in unit
* tests, using ephemeral port will address port conflicts problem and allow
* running tests in parallel.
*
* @return whether is allowed to use an ephemeral port.
*/
public boolean getAllowEphemeralPorts() {
return this.getBoolean(ALLOW_EPHEMERAL_PORTS, false);
}
/**
* Configure the bookie to allow using an ephemeral port.
*
* @param allow whether to allow using an ephemeral port.
* @return server configuration
*/
public ServerConfiguration setAllowEphemeralPorts(boolean allow) {
this.setProperty(ALLOW_EPHEMERAL_PORTS, allow);
return this;
}
/**
* Return whether we should allow addition of ledger/index dirs to an existing bookie.
*
* @return true if the addition is allowed; false otherwise
*/
public boolean getAllowStorageExpansion() {
return this.getBoolean(ALLOW_STORAGE_EXPANSION, false);
}
/**
* Change the setting of whether or not we should allow ledger/index
* dirs to be added to the current set of dirs.
*
* @param val - true if new ledger/index dirs can be added; false otherwise.
*
* @return server configuration
*/
public ServerConfiguration setAllowStorageExpansion(boolean val) {
this.setProperty(ALLOW_STORAGE_EXPANSION, val);
return this;
}
/**
* Get dir names to store journal files.
*
* @return journal dir name
*/
public String[] getJournalDirNames() {
String[] journalDirs = this.getStringArray(JOURNAL_DIRS);
if (journalDirs == null || journalDirs.length == 0
|| (journalDirs.length == 1 && StringUtils.isEmpty(journalDirs[0]))) {
return new String[] { getJournalDirName() };
}
return journalDirs;
}
/**
* Get dir name to store journal files.
*
* @return journal dir name
*/
@Deprecated
public String getJournalDirName() {
return this.getString(JOURNAL_DIR, "/tmp/bk-txn");
}
/**
* Get dir name to store journal files.
*
* @return journal dir name
*/
public String getJournalDirNameWithoutDefault() {
return this.getString(JOURNAL_DIR);
}
/**
* Set dir name to store journal files.
*
* @param journalDir
* Dir to store journal files
* @return server configuration
*/
public ServerConfiguration setJournalDirName(String journalDir) {
this.setProperty(JOURNAL_DIRS, new String[] {journalDir});
return this;
}
/**
* Set dir names to store journal files.
*
* @param journalDirs
* Dir to store journal files
* @return server configuration
*/
public ServerConfiguration setJournalDirsName(String[] journalDirs) {
this.setProperty(JOURNAL_DIRS, journalDirs);
return this;
}
/**
* Get dirs to store journal files.
*
* @return journal dirs, if no journal dir provided return null
*/
public File[] getJournalDirs() {
String[] journalDirNames = getJournalDirNames();
File[] journalDirs = new File[journalDirNames.length];
for (int i = 0; i < journalDirNames.length; i++) {
journalDirs[i] = new File(journalDirNames[i]);
}
return journalDirs;
}
/**
* Get dir names to store ledger data.
*
* @return ledger dir names, if not provided return null
*/
public String[] getLedgerDirWithoutDefault() {
return this.getStringArray(LEDGER_DIRS);
}
/**
* Get dir names to store ledger data.
*
* @return ledger dir names, if not provided return null
*/
public String[] getLedgerDirNames() {
String[] ledgerDirs = this.getStringArray(LEDGER_DIRS);
if ((null == ledgerDirs) || (0 == ledgerDirs.length)) {
return new String[] { "/tmp/bk-data" };
}
return ledgerDirs;
}
/**
* Set dir names to store ledger data.
*
* @param ledgerDirs
* Dir names to store ledger data
* @return server configuration
*/
public ServerConfiguration setLedgerDirNames(String[] ledgerDirs) {
if (null == ledgerDirs) {
return this;
}
this.setProperty(LEDGER_DIRS, ledgerDirs);
return this;
}
/**
* Get dirs that stores ledger data.
*
* @return ledger dirs
*/
public File[] getLedgerDirs() {
String[] ledgerDirNames = getLedgerDirNames();
File[] ledgerDirs = new File[ledgerDirNames.length];
for (int i = 0; i < ledgerDirNames.length; i++) {
ledgerDirs[i] = new File(ledgerDirNames[i]);
}
return ledgerDirs;
}
/**
* Get dir name to store index files.
*
* @return ledger index dir name, if no index dirs provided return null
*/
public String[] getIndexDirNames() {
if (!this.containsKey(INDEX_DIRS)) {
return null;
}
return this.getStringArray(INDEX_DIRS);
}
/**
* Set dir name to store index files.
*
* @param indexDirs
* Index dir names
* @return server configuration.
*/
public ServerConfiguration setIndexDirName(String[] indexDirs) {
this.setProperty(INDEX_DIRS, indexDirs);
return this;
}
/**
* Get index dir to store ledger index files.
*
* @return index dirs, if no index dirs provided return null
*/
public File[] getIndexDirs() {
String[] idxDirNames = getIndexDirNames();
if (null == idxDirNames) {
return null;
}
File[] idxDirs = new File[idxDirNames.length];
for (int i = 0; i < idxDirNames.length; i++) {
idxDirs[i] = new File(idxDirNames[i]);
}
return idxDirs;
}
/**
* Is tcp connection no delay.
*
* @return tcp socket nodelay setting
*/
public boolean getServerTcpNoDelay() {
return getBoolean(SERVER_TCP_NODELAY, true);
}
/**
* Set socket nodelay setting.
*
* @param noDelay
* NoDelay setting
* @return server configuration
*/
public ServerConfiguration setServerTcpNoDelay(boolean noDelay) {
setProperty(SERVER_TCP_NODELAY, Boolean.toString(noDelay));
return this;
}
/**
* Get the number of IO threads. This is the number of
* threads used by Netty to handle TCP connections.
*
* @return the number of IO threads
*/
public int getServerNumIOThreads() {
return getInt(SERVER_NUM_IO_THREADS, 2 * Runtime.getRuntime().availableProcessors());
}
/**
* Get the number of Acceptor threads.
*
* @return the number of Acceptor threads
*/
public int getServerNumAcceptorThreads() {
return getInt(SERVER_NUM_ACCEPTOR_THREADS, 1);
}
/**
* Set the number of IO threads.
*
* <p>
* This is the number of threads used by Netty to handle TCP connections.
* </p>
*
* @see #getServerNumIOThreads()
* @param numThreads number of IO threads used for bookkeeper
* @return client configuration
*/
public ServerConfiguration setServerNumIOThreads(int numThreads) {
setProperty(SERVER_NUM_IO_THREADS, Integer.toString(numThreads));
return this;
}
/**
* Timeout to drain the socket on close.
*
* @return socket linger setting
*/
public int getServerSockLinger() {
return getInt(SERVER_SOCK_LINGER, 0);
}
/**
* Set socket linger timeout on close.
*
* <p>When enabled, a close or shutdown will not return until all queued messages for the socket have been
* successfully sent or the linger timeout has been reached. Otherwise, the call returns immediately and the
* closing is done in the background.
*
* @param linger
* NoDelay setting
* @return server configuration
*/
public ServerConfiguration setServerSockLinger(int linger) {
setProperty(SERVER_SOCK_LINGER, Integer.toString(linger));
return this;
}
/**
* Get socket keepalive.
*
* @return socket keepalive setting
*/
public boolean getServerSockKeepalive() {
return getBoolean(SERVER_SOCK_KEEPALIVE, true);
}
/**
* Set socket keepalive setting.
*
* <p>This setting is used to send keep-alive messages on connection-oriented sockets.
*
* @param keepalive
* KeepAlive setting
* @return server configuration
*/
public ServerConfiguration setServerSockKeepalive(boolean keepalive) {
setProperty(SERVER_SOCK_KEEPALIVE, Boolean.toString(keepalive));
return this;
}
/**
* Get zookeeper client backoff retry start time in millis.
*
* @return zk backoff retry start time in millis.
*/
public int getZkRetryBackoffStartMs() {
return getInt(ZK_RETRY_BACKOFF_START_MS, getZkTimeout());
}
/**
* Set zookeeper client backoff retry start time in millis.
*
* @param retryMs
* backoff retry start time in millis.
* @return server configuration.
*/
public ServerConfiguration setZkRetryBackoffStartMs(int retryMs) {
setProperty(ZK_RETRY_BACKOFF_START_MS, retryMs);
return this;
}
/**
* Get zookeeper client backoff retry max time in millis.
*
* @return zk backoff retry max time in millis.
*/
public int getZkRetryBackoffMaxMs() {
return getInt(ZK_RETRY_BACKOFF_MAX_MS, getZkTimeout());
}
/**
* Set zookeeper client backoff retry max time in millis.
*
* @param retryMs
* backoff retry max time in millis.
* @return server configuration.
*/
public ServerConfiguration setZkRetryBackoffMaxMs(int retryMs) {
setProperty(ZK_RETRY_BACKOFF_MAX_MS, retryMs);
return this;
}
/**
* Is statistics enabled.
*
* @return is statistics enabled
*/
public boolean isStatisticsEnabled() {
return getBoolean(ENABLE_STATISTICS, true);
}
/**
* Turn on/off statistics.
*
* @param enabled
* Whether statistics enabled or not.
* @return server configuration
*/
public ServerConfiguration setStatisticsEnabled(boolean enabled) {
setProperty(ENABLE_STATISTICS, Boolean.toString(enabled));
return this;
}
/**
* Allow manually force compact the entry log or not.
*
* @param enable
* whether allow manually force compact the entry log or not.
* @return service configuration.
*/
public ServerConfiguration setForceAllowCompaction(boolean enable) {
setProperty(FORCE_ALLOW_COMPACTION, enable);
return this;
}
/**
* The force compaction is allowed or not when disabling the entry log compaction.
*
* @return the force compaction is allowed or not when disabling the entry log compaction.
*/
public boolean isForceAllowCompaction() {
return getBoolean(FORCE_ALLOW_COMPACTION, false);
}
/**
* Get threshold of minor compaction.
*
* <p>For those entry log files whose remaining size percentage reaches below
* this threshold will be compacted in a minor compaction.
*
* <p>If it is set to less than zero, the minor compaction is disabled.
*
* @return threshold of minor compaction
*/
public double getMinorCompactionThreshold() {
return getDouble(MINOR_COMPACTION_THRESHOLD, 0.2d);
}
/**
* Set threshold of minor compaction.
*
* @see #getMinorCompactionThreshold()
*
* @param threshold
* Threshold for minor compaction
* @return server configuration
*/
public ServerConfiguration setMinorCompactionThreshold(double threshold) {
setProperty(MINOR_COMPACTION_THRESHOLD, threshold);
return this;
}
/**
* Get threshold of major compaction.
*
* <p>For those entry log files whose remaining size percentage reaches below
* this threshold will be compacted in a major compaction.
*
* <p>If it is set to less than zero, the major compaction is disabled.
*
* @return threshold of major compaction
*/
public double getMajorCompactionThreshold() {
return getDouble(MAJOR_COMPACTION_THRESHOLD, 0.8d);
}
/**
* Set threshold of major compaction.
*
* @see #getMajorCompactionThreshold()
*
* @param threshold
* Threshold of major compaction
* @return server configuration
*/
public ServerConfiguration setMajorCompactionThreshold(double threshold) {
setProperty(MAJOR_COMPACTION_THRESHOLD, threshold);
return this;
}
/**
* Get the maximum milliseconds to run major compaction. If {@literal <=}0 the
* thread will run until all compaction is completed.
*
* @return limit
* The number of milliseconds to run compaction.
*/
public long getMajorCompactionMaxTimeMillis() {
return getLong(MAJOR_COMPACTION_MAX_TIME_MILLIS, -1);
}
/**
* Set the maximum milliseconds to run major compaction. If {@literal <=}0 the
* thread will run until all compaction is completed.
*
* @see #getMajorCompactionMaxTimeMillis()
*
* @param majorCompactionMaxTimeMillis
* The number of milliseconds to run compaction.
*
* @return server configuration
*/
public ServerConfiguration setMajorCompactionMaxTimeMillis(long majorCompactionMaxTimeMillis) {
setProperty(MAJOR_COMPACTION_MAX_TIME_MILLIS, majorCompactionMaxTimeMillis);
return this;
}
/**
* Get interval to run minor compaction, in seconds.
*
* <p>If it is set to less than zero, the minor compaction is disabled.
*
* @return threshold of minor compaction
*/
public long getMinorCompactionInterval() {
return getLong(MINOR_COMPACTION_INTERVAL, 3600);
}
/**
* Set interval to run minor compaction.
*
* @see #getMinorCompactionInterval()
*
* @param interval
* Interval to run minor compaction
* @return server configuration
*/
public ServerConfiguration setMinorCompactionInterval(long interval) {
setProperty(MINOR_COMPACTION_INTERVAL, interval);
return this;
}
/**
* Get interval to run major compaction, in seconds.
*
* <p>If it is set to less than zero, the major compaction is disabled.
*
* @return high water mark
*/
public long getMajorCompactionInterval() {
return getLong(MAJOR_COMPACTION_INTERVAL, 86400);
}
/**
* Set interval to run major compaction.
*
* @see #getMajorCompactionInterval()
*
* @param interval
* Interval to run major compaction
* @return server configuration
*/
public ServerConfiguration setMajorCompactionInterval(long interval) {
setProperty(MAJOR_COMPACTION_INTERVAL, interval);
return this;
}
/**
* Get the maximum milliseconds to run minor compaction. If {@literal <=}0 the
* thread will run until all compaction is completed.
*
* @return limit
* The number of milliseconds to run compaction.
*/
public long getMinorCompactionMaxTimeMillis() {
return getLong(MINOR_COMPACTION_MAX_TIME_MILLIS, -1);
}
/**
* Set the maximum milliseconds to run minor compaction. If {@literal <=}0 the
* thread will run until all compaction is completed.
*
* @see #getMinorCompactionMaxTimeMillis()
*
* @param minorCompactionMaxTimeMillis
* The number of milliseconds to run compaction.
*
* @return server configuration
*/
public ServerConfiguration setMinorCompactionMaxTimeMillis(long minorCompactionMaxTimeMillis) {
setProperty(MINOR_COMPACTION_MAX_TIME_MILLIS, minorCompactionMaxTimeMillis);
return this;
}
/**
* Get whether force compaction is allowed when disk full or almost full.
*
* <p>Force GC may get some space back, but may also fill up disk space more
* quickly. This is because new log files are created before GC, while old
* garbage log files deleted after GC.
*
* @return true - do force GC when disk full,
* false - suspend GC when disk full.
*/
public boolean getIsForceGCAllowWhenNoSpace() {
return getBoolean(IS_FORCE_GC_ALLOW_WHEN_NO_SPACE, false);
}
/**
* Set whether force GC is allowed when disk full or almost full.
*
* @param force true to allow force GC; false to suspend GC
*
* @return ServerConfiguration
*/
public ServerConfiguration setIsForceGCAllowWhenNoSpace(boolean force) {
setProperty(IS_FORCE_GC_ALLOW_WHEN_NO_SPACE, force);
return this;
}
/**
* Set the grace period which the rereplication worker will wait before
* fencing and rereplicating a ledger fragment which is still being written
* to, on bookie failure.
*
* <p>The grace period allows the writer to detect the bookie failure, and and
* start writing to another ledger fragment. If the writer writes nothing
* during the grace period, the rereplication worker assumes that it has
* crashed and therefore fences the ledger, preventing any further writes to
* that ledger.
*
* @see org.apache.bookkeeper.client.BookKeeper#openLedger
*
* @param waitTime time to wait before replicating ledger fragment
*/
public void setOpenLedgerRereplicationGracePeriod(String waitTime) {
setProperty(OPEN_LEDGER_REREPLICATION_GRACE_PERIOD, waitTime);
}
/**
* Get the grace period which the rereplication worker to wait before
* fencing and rereplicating a ledger fragment which is still being written
* to, on bookie failure.
*
* @return long
*/
public long getOpenLedgerRereplicationGracePeriod() {
return getLong(OPEN_LEDGER_REREPLICATION_GRACE_PERIOD, 30000);
}
/**
* Set the grace period, in milliseconds, which the replication worker has
* to wait before releasing the lock after it failed to replicate a ledger.
* For the first ReplicationWorker.NUM_OF_EXPONENTIAL_BACKOFF_RETRIALS
* failures it will do exponential backoff then it will bound at
* LOCK_RELEASE_OF_FAILED_LEDGER_GRACE_PERIOD.
*
* <p>On replication failure, instead of releasing the lock immediately
* after failed attempt, it will hold under replicated ledger lock for the
* grace period and then it will release the lock.
*
* @param waitTime
*/
public void setLockReleaseOfFailedLedgerGracePeriod(String waitTime) {
setProperty(LOCK_RELEASE_OF_FAILED_LEDGER_GRACE_PERIOD, waitTime);
}
/**
* Get the grace period, in milliseconds, which the replication worker has
* to wait before releasing the lock after it failed to replicate a ledger.
* For the first ReplicationWorker.NUM_OF_EXPONENTIAL_BACKOFF_RETRIALS
* failures it will do exponential backoff then it will bound at
* LOCK_RELEASE_OF_FAILED_LEDGER_GRACE_PERIOD.
*
* @return
*/
public long getLockReleaseOfFailedLedgerGracePeriod() {
return getLong(LOCK_RELEASE_OF_FAILED_LEDGER_GRACE_PERIOD, 300000);
}
/**
* Get the number of bytes we should use as capacity for
* org.apache.bookkeeper.bookie.BufferedReadChannel.
* Default is 512 bytes
* @return read buffer size
*/
public int getReadBufferBytes() {
return getInt(READ_BUFFER_SIZE, 512);
}
/**
* Set the number of bytes we should use as capacity for
* org.apache.bookkeeper.bookie.BufferedReadChannel.
*
* @param readBufferSize
* Read Buffer Size
* @return server configuration
*/
public ServerConfiguration setReadBufferBytes(int readBufferSize) {
setProperty(READ_BUFFER_SIZE, readBufferSize);
return this;
}
/**
* Set the number of threads that would handle write requests.
*
* @param numThreads
* number of threads to handle write requests.
* @return server configuration
*/
public ServerConfiguration setNumAddWorkerThreads(int numThreads) {
setProperty(NUM_ADD_WORKER_THREADS, numThreads);
return this;
}
/**
* Get the number of threads that should handle write requests.
*
* @return the number of threads that handle write requests.
*/
public int getNumAddWorkerThreads() {
return getInt(NUM_ADD_WORKER_THREADS, 1);
}
/**
* Set the number of threads that should handle long poll requests.
*
* @param numThreads
* number of threads to handle long poll requests.
* @return server configuration
*/
public ServerConfiguration setNumLongPollWorkerThreads(int numThreads) {
setProperty(NUM_LONG_POLL_WORKER_THREADS, numThreads);
return this;
}
/**
* Get the number of threads that should handle long poll requests.
*
* <p>If the number of threads is zero or negative, bookie will fallback to
* use read threads. If there is no read threads used, it will create a thread pool
* with {@link Runtime#availableProcessors()} threads.
*
* @return the number of threads that should handle long poll requests, default value is 0.
*/
public int getNumLongPollWorkerThreads() {
return getInt(NUM_LONG_POLL_WORKER_THREADS, 0);
}
/**
* Set the number of threads that should be used for high priority requests
* (i.e. recovery reads and adds, and fencing)
*
* @param numThreads
* number of threads to handle high priority requests.
* @return server configuration
*/
public ServerConfiguration setNumHighPriorityWorkerThreads(int numThreads) {
setProperty(NUM_HIGH_PRIORITY_WORKER_THREADS, numThreads);
return this;
}
/**
* Get the number of threads that should be used for high priority requests
* (i.e. recovery reads and adds, and fencing)
* @return
*/
public int getNumHighPriorityWorkerThreads() {
return getInt(NUM_HIGH_PRIORITY_WORKER_THREADS, 8);
}
/**
* Use auto-throttling of the read-worker threads. This is done
* to ensure the bookie is not using unlimited amount of memory
* to respond to read-requests.
*
* @param throttle
* whether to throttle the read workers threads
* @return server configuration
*/
public ServerConfiguration setReadWorkerThreadsThrottlingEnabled(boolean throttle) {
setProperty(READ_WORKER_THREADS_THROTTLING_ENABLED, throttle);
return this;
}
/**
* Get the auto-throttling status of the read-worker threads.
* @return
*/
public boolean isReadWorkerThreadsThrottlingEnabled() {
return getBoolean(READ_WORKER_THREADS_THROTTLING_ENABLED, true);
}
/**
* Set the number of threads that would handle read requests.
*
* @param numThreads
* Number of threads to handle read requests.
* @return server configuration
*/
public ServerConfiguration setNumReadWorkerThreads(int numThreads) {
setProperty(NUM_READ_WORKER_THREADS, numThreads);
return this;
}
/**
* Get the number of threads that should handle read requests.
*/
public int getNumReadWorkerThreads() {
return getInt(NUM_READ_WORKER_THREADS, 8);
}
/**
* Set the tick duration in milliseconds.
*
* @param tickDuration
* tick duration in milliseconds.
* @return server configuration
*/
public ServerConfiguration setRequestTimerTickDurationMs(int tickDuration) {
setProperty(REQUEST_TIMER_TICK_DURATION_MILLISEC, tickDuration);
return this;
}
/**
* Set the max number of pending read requests for each read worker thread. After the quota is reached,
* new requests will be failed immediately.
*
* @param maxPendingReadRequestsPerThread
* @return server configuration
*/
public ServerConfiguration setMaxPendingReadRequestPerThread(int maxPendingReadRequestsPerThread) {
setProperty(MAX_PENDING_READ_REQUESTS_PER_THREAD, maxPendingReadRequestsPerThread);
return this;
}
/**
* If read workers threads are enabled, limit the number of pending requests, to avoid the executor queue to grow
* indefinitely (default: 10000 entries).
*/
public int getMaxPendingReadRequestPerThread() {
return getInt(MAX_PENDING_READ_REQUESTS_PER_THREAD, 10000);
}
/**
* Set the max number of pending add requests for each add worker thread. After the quota is reached, new requests
* will be failed immediately.
*
* @param maxPendingAddRequestsPerThread
* @return server configuration
*/
public ServerConfiguration setMaxPendingAddRequestPerThread(int maxPendingAddRequestsPerThread) {
setProperty(MAX_PENDING_ADD_REQUESTS_PER_THREAD, maxPendingAddRequestsPerThread);
return this;
}
/**
* If add workers threads are enabled, limit the number of pending requests, to avoid the executor queue to grow
* indefinitely (default: 10000 entries).
*/
public int getMaxPendingAddRequestPerThread() {
return getInt(MAX_PENDING_ADD_REQUESTS_PER_THREAD, 10000);
}
/**
* Get the tick duration in milliseconds.
* @return
*/
public int getRequestTimerTickDurationMs() {
return getInt(REQUEST_TIMER_TICK_DURATION_MILLISEC, 10);
}
/**
* Set the number of ticks per wheel for the request timer.
*
* @param tickCount
* number of ticks per wheel for the request timer.
* @return server configuration
*/
public ServerConfiguration setRequestTimerNumTicks(int tickCount) {
setProperty(REQUEST_TIMER_NO_OF_TICKS, tickCount);
return this;
}
/**
* Get the number of ticks per wheel for the request timer.
* @return
*/
public int getRequestTimerNumTicks() {
return getInt(REQUEST_TIMER_NO_OF_TICKS, 1024);
}
/**
* Get the number of bytes used as capacity for the write buffer. Default is
* 64KB.
* NOTE: Make sure this value is greater than the maximum message size.
* @return the size of the write buffer in bytes
*/
public int getWriteBufferBytes() {
return getInt(WRITE_BUFFER_SIZE, 65536);
}
/**
* Set the number of bytes used as capacity for the write buffer.
*
* @param writeBufferBytes
* Write Buffer Bytes
* @return server configuration
*/
public ServerConfiguration setWriteBufferBytes(int writeBufferBytes) {
setProperty(WRITE_BUFFER_SIZE, writeBufferBytes);
return this;
}
/**
* Set the number of threads that would handle journal callbacks.
*
* @param numThreads
* number of threads to handle journal callbacks.
* @return server configuration
*/
@Deprecated
public ServerConfiguration setNumJournalCallbackThreads(int numThreads) {
setProperty(NUM_JOURNAL_CALLBACK_THREADS, numThreads);
return this;
}
/**
* Get the number of threads that should handle journal callbacks.
*
* @return the number of threads that handle journal callbacks.
*/
@Deprecated
public int getNumJournalCallbackThreads() {
return getInt(NUM_JOURNAL_CALLBACK_THREADS, 1);
}
/**
* Set sorted-ledger storage enabled or not.
*
* @deprecated Use {@link #setLedgerStorageClass(String)} to configure the implementation class
* @param enabled
*/
public ServerConfiguration setSortedLedgerStorageEnabled(boolean enabled) {
this.setProperty(SORTED_LEDGER_STORAGE_ENABLED, enabled);
return this;
}
/**
* Check if sorted-ledger storage enabled (default true).
*
* @return true if sorted ledger storage is enabled, false otherwise
*/
public boolean getSortedLedgerStorageEnabled() {
return this.getBoolean(SORTED_LEDGER_STORAGE_ENABLED, true);
}
/**
* Get skip list data size limitation (default 64MB).
* Max value is 1,073,741,823
*
* @return skip list data size limitation
*/
public long getSkipListSizeLimit() {
return this.getLong(SKIP_LIST_SIZE_LIMIT, 64 * 1024 * 1024L);
}
/**
* Set skip list size limit.
*
* @param size skip list size limit.
* @return server configuration object.
*/
public ServerConfiguration setSkipListSizeLimit(int size) {
if (size > (Integer.MAX_VALUE - 1) / 2) {
// gives max of 2*1023MB for mem table (one being checkpointed and still writable).
throw new IllegalArgumentException("skiplist size over " + ((Integer.MAX_VALUE - 1) / 2));
}
setProperty(SKIP_LIST_SIZE_LIMIT, size);
return this;
}
/**
* Get the number of bytes we should use as chunk allocation for
* org.apache.bookkeeper.bookie.SkipListArena.
* Default is 4 MB
* @return the number of bytes to use for each chunk in the skiplist arena
*/
public int getSkipListArenaChunkSize() {
return getInt(SKIP_LIST_CHUNK_SIZE_ENTRY, 4096 * 1024);
}
/**
* Set the number of bytes we used as chunk allocation for
* org.apache.bookkeeper.bookie.SkipListArena.
*
* @param size chunk size.
* @return server configuration object.
*/
public ServerConfiguration setSkipListArenaChunkSize(int size) {
setProperty(SKIP_LIST_CHUNK_SIZE_ENTRY, size);
return this;
}
/**
* Get the max size we should allocate from the skiplist arena. Allocations
* larger than this should be allocated directly by the VM to avoid fragmentation.
*
* @return max size allocatable from the skiplist arena (Default is 128 KB)
*/
public int getSkipListArenaMaxAllocSize() {
return getInt(SKIP_LIST_MAX_ALLOC_ENTRY, 128 * 1024);
}
/**
* Set the max size we should allocate from the skiplist arena. Allocations
* larger than this should be allocated directly by the VM to avoid fragmentation.
*
* @param size max alloc size.
* @return server configuration object.
*/
public ServerConfiguration setSkipListArenaMaxAllocSize(int size) {
setProperty(SKIP_LIST_MAX_ALLOC_ENTRY, size);
return this;
}
/**
* Should the data be fsynced on journal before acknowledgment.
*
* <p>Default is true
*
* @return
*/
public boolean getJournalSyncData() {
return getBoolean(JOURNAL_SYNC_DATA, true);
}
/**
* Should the data be written to journal before acknowledgment.
*
* <p>Default is true
*
* @return
*/
public boolean getJournalWriteData() {
return getBoolean(JOURNAL_WRITE_DATA, true);
}
/**
* Should the data be written to journal before acknowledgment.
*
* <p>Default is true
*
* @return
*/
public ServerConfiguration setJournalWriteData(boolean journalWriteData) {
setProperty(JOURNAL_WRITE_DATA, journalWriteData);
return this;
}
/**
* Enable or disable journal syncs.
*
* <p>By default, data sync is enabled to guarantee durability of writes.
*
* <p>Beware: while disabling data sync in the Bookie journal might improve the bookie write performance, it will
* also introduce the possibility of data loss. With no sync, the journal entries are written in the OS page cache
* but not flushed to disk. In case of power failure, the affected bookie might lose the unflushed data. If the
* ledger is replicated to multiple bookies, the chances of data loss are reduced though still present.
*
* @param syncData
* whether to sync data on disk before acknowledgement
* @return server configuration object
*/
public ServerConfiguration setJournalSyncData(boolean syncData) {
setProperty(JOURNAL_SYNC_DATA, syncData);
return this;
}
/**
* Should we group journal force writes.
*
* @return group journal force writes
*/
public boolean getJournalAdaptiveGroupWrites() {
return getBoolean(JOURNAL_ADAPTIVE_GROUP_WRITES, true);
}
/**
* Enable/disable group journal force writes.
*
* @param enabled flag to enable/disable group journal force writes
*/
public ServerConfiguration setJournalAdaptiveGroupWrites(boolean enabled) {
setProperty(JOURNAL_ADAPTIVE_GROUP_WRITES, enabled);
return this;
}
/**
* Maximum latency to impose on a journal write to achieve grouping. Default is 2ms.
*
* @return max wait for grouping
*/
public long getJournalMaxGroupWaitMSec() {
return getLong(JOURNAL_MAX_GROUP_WAIT_MSEC, 2);
}
/**
* Sets the maximum latency to impose on a journal write to achieve grouping.
*
* @param journalMaxGroupWaitMSec
* maximum time to wait in milliseconds.
* @return server configuration.
*/
public ServerConfiguration setJournalMaxGroupWaitMSec(long journalMaxGroupWaitMSec) {
setProperty(JOURNAL_MAX_GROUP_WAIT_MSEC, journalMaxGroupWaitMSec);
return this;
}
/**
* Maximum bytes to buffer to impose on a journal write to achieve grouping.
*
* @return max bytes to buffer
*/
public long getJournalBufferedWritesThreshold() {
return getLong(JOURNAL_BUFFERED_WRITES_THRESHOLD, 512 * 1024);
}
/**
* Set maximum bytes to buffer to impose on a journal write to achieve grouping.
*
* @param maxBytes maximum bytes to buffer to impose on a journal write
* @return max bytes to buffer
*/
public ServerConfiguration setJournalBufferedWritesThreshold(long maxBytes) {
setProperty(JOURNAL_BUFFERED_WRITES_THRESHOLD, maxBytes);
return this;
}
/**
* Maximum entries to buffer to impose on a journal write to achieve grouping.
* Use {@link #getJournalBufferedWritesThreshold()} if this is set to zero or
* less than zero.
*
* @return max entries to buffer.
*/
public long getJournalBufferedEntriesThreshold() {
return getLong(JOURNAL_BUFFERED_ENTRIES_THRESHOLD, 0);
}
/**
* Set maximum entries to buffer to impose on a journal write to achieve grouping.
* Use {@link #getJournalBufferedWritesThreshold()} set this to zero or less than
* zero.
*
* @param maxEntries
* maximum entries to buffer.
* @return server configuration.
*/
public ServerConfiguration setJournalBufferedEntriesThreshold(int maxEntries) {
setProperty(JOURNAL_BUFFERED_ENTRIES_THRESHOLD, maxEntries);
return this;
}
/**
* Set if we should flush the journal when queue is empty.
*/
public ServerConfiguration setJournalFlushWhenQueueEmpty(boolean enabled) {
setProperty(JOURNAL_FLUSH_WHEN_QUEUE_EMPTY, enabled);
return this;
}
/**
* Should we flush the journal when queue is empty.
*
* @return flush when queue is empty
*/
public boolean getJournalFlushWhenQueueEmpty() {
return getBoolean(JOURNAL_FLUSH_WHEN_QUEUE_EMPTY, false);
}
/**
* Set whether the bookie is able to go into read-only mode.
* If this is set to false, the bookie will shutdown on encountering
* an error condition.
*
* @param enabled whether to enable read-only mode.
*
* @return ServerConfiguration
*/
public ServerConfiguration setReadOnlyModeEnabled(boolean enabled) {
setProperty(READ_ONLY_MODE_ENABLED, enabled);
return this;
}
/**
* Get whether read-only mode is enabled. The default is true.
*
* @return boolean
*/
public boolean isReadOnlyModeEnabled() {
return getBoolean(READ_ONLY_MODE_ENABLED, true);
}
/**
* Set whether the bookie is able to go into read-only mode when any disk is full.
* If this set to false, it will behave to READ_ONLY_MODE_ENABLED flag.
*
* @param enabled whether to enable read-only mode when any disk is full.
* @return
*/
public ServerConfiguration setReadOnlyModeOnAnyDiskFullEnabled(boolean enabled) {
setProperty(READ_ONLY_MODE_ON_ANY_DISK_FULL_ENABLED, enabled);
return this;
}
/**
* Get whether read-only mode is enable when any disk is full. The default is false.
*
* @return boolean
*/
public boolean isReadOnlyModeOnAnyDiskFullEnabled() {
return getBoolean(READ_ONLY_MODE_ON_ANY_DISK_FULL_ENABLED, false);
}
/**
* Set the warning threshold for disk usage.
*
* @param threshold warning threshold to force gc.
*
* @return ServerConfiguration
*/
public ServerConfiguration setDiskUsageWarnThreshold(float threshold) {
setProperty(DISK_USAGE_WARN_THRESHOLD, threshold);
return this;
}
/**
* Returns the warning threshold for disk usage. If disk usage
* goes beyond this, a garbage collection cycle will be forced.
* @return the percentage at which a disk usage warning will trigger
*/
public float getDiskUsageWarnThreshold() {
return getFloat(DISK_USAGE_WARN_THRESHOLD, 0.90f);
}
/**
* Whether to persist the bookie status so that when bookie server restarts,
* it will continue using the previous status.
*
* @param enabled
* - true if persist the bookie status. Otherwise false.
* @return ServerConfiguration
*/
public ServerConfiguration setPersistBookieStatusEnabled(boolean enabled) {
setProperty(PERSIST_BOOKIE_STATUS_ENABLED, enabled);
return this;
}
/**
* Get whether to persist the bookie status so that when bookie server restarts,
* it will continue using the previous status.
*
* @return true - if need to start a bookie in read only mode. Otherwise false.
*/
public boolean isPersistBookieStatusEnabled() {
return getBoolean(PERSIST_BOOKIE_STATUS_ENABLED, false);
}
/**
* Set the Disk free space threshold as a fraction of the total
* after which disk will be considered as full during disk check.
*
* @param threshold threshold to declare a disk full
*
* @return ServerConfiguration
*/
public ServerConfiguration setDiskUsageThreshold(float threshold) {
setProperty(DISK_USAGE_THRESHOLD, threshold);
return this;
}
/**
* Returns disk free space threshold. By default it is 0.95.
*
* @return the percentage at which a disk will be considered full
*/
public float getDiskUsageThreshold() {
return getFloat(DISK_USAGE_THRESHOLD, 0.95f);
}
/**
* Set the disk free space low water mark threshold.
* Disk is considered full when usage threshold is exceeded.
* Disk returns back to non-full state when usage is below low water mark threshold.
* This prevents it from going back and forth between these states frequently
* when concurrent writes and compaction are happening. This also prevent bookie from
* switching frequently between read-only and read-writes states in the same cases.
*
* @param threshold threshold to declare a disk full
*
* @return ServerConfiguration
*/
public ServerConfiguration setDiskLowWaterMarkUsageThreshold(float threshold) {
setProperty(DISK_USAGE_LWM_THRESHOLD, threshold);
return this;
}
/**
* Returns disk free space low water mark threshold. By default it is the
* same as usage threshold (for backwards-compatibility).
*
* @return the percentage below which a disk will NOT be considered full
*/
public float getDiskLowWaterMarkUsageThreshold() {
return getFloat(DISK_USAGE_LWM_THRESHOLD, getDiskUsageThreshold());
}
/**
* Set the disk checker interval to monitor ledger disk space.
*
* @param interval interval between disk checks for space.
*
* @return ServerConfiguration
*/
public ServerConfiguration setDiskCheckInterval(int interval) {
setProperty(DISK_CHECK_INTERVAL, interval);
return this;
}
/**
* Get the disk checker interval.
*
* @return int
*/
public int getDiskCheckInterval() {
return getInt(DISK_CHECK_INTERVAL, 10 * 1000);
}
/**
* Set the regularity at which the auditor will run a check
* of all ledgers. This should not be run very often, and at most,
* once a day. Setting this to 0 will completely disable the periodic
* check.
*
* @param interval The interval in seconds. e.g. 86400 = 1 day, 604800 = 1 week
*/
public void setAuditorPeriodicCheckInterval(long interval) {
setProperty(AUDITOR_PERIODIC_CHECK_INTERVAL, interval);
}
/**
* Get the regularity at which the auditor checks all ledgers.
* @return The interval in seconds. Default is 604800 (1 week).
*/
public long getAuditorPeriodicCheckInterval() {
return getLong(AUDITOR_PERIODIC_CHECK_INTERVAL, 604800);
}
/**
* Set the interval between auditor bookie checks.
* The auditor bookie check, checks ledger metadata to see which bookies
* contain entries for each ledger. If a bookie which should contain entries
* is unavailable, then the ledger containing that entry is marked for recovery.
* Setting this to 0 disabled the periodic check. Bookie checks will still
* run when a bookie fails.
*
* @param interval The period in seconds.
*/
public void setAuditorPeriodicBookieCheckInterval(long interval) {
setProperty(AUDITOR_PERIODIC_BOOKIE_CHECK_INTERVAL, interval);
}
/**
* Get the interval between auditor bookie check runs.
* @see #setAuditorPeriodicBookieCheckInterval(long)
* @return the interval between bookie check runs, in seconds. Default is 86400 (= 1 day)
*/
public long getAuditorPeriodicBookieCheckInterval() {
return getLong(AUDITOR_PERIODIC_BOOKIE_CHECK_INTERVAL, 86400);
}
/**
* Sets the regularity/interval at which the auditor will run a placement
* policy check of all ledgers, which are closed. This should not be run
* very often, and should be run at most once a day. Setting this to 0 will
* completely disable the periodic metadata check.
*
* @param interval
* The interval in seconds. e.g. 86400 = 1 day, 604800 = 1 week
*/
public void setAuditorPeriodicPlacementPolicyCheckInterval(long interval) {
setProperty(AUDITOR_PERIODIC_PLACEMENT_POLICY_CHECK_INTERVAL, interval);
}
/**
* Get the regularity at which the auditor does placement policy check of
* all ledgers, which are closed.
*
* @return The interval in seconds. By default, it is disabled.
*/
public long getAuditorPeriodicPlacementPolicyCheckInterval() {
return getLong(AUDITOR_PERIODIC_PLACEMENT_POLICY_CHECK_INTERVAL, 0);
}
public void setRepairedPlacementPolicyNotAdheringBookieEnable(boolean enabled) {
setProperty(REPAIRED_PLACEMENT_POLICY_NOT_ADHERING_BOOKIE_ENABLED, enabled);
}
/**
* Now the feature only support RackawareEnsemblePlacementPolicy.
*
* In Auditor, it combines with {@link #getAuditorPeriodicPlacementPolicyCheckInterval}, to control is marked
* ledger id to under replication managed when found a ledger ensemble not adhere to placement policy.
* In ReplicationWorker, to control is to repair the ledger which the ensemble does not adhere to the placement
* policy. By default, it is disabled.
*
* If you want to enable this feature, there maybe lots of ledger will be mark underreplicated.
* The replicationWorker will replicate lots of ledger, it will increase read request and write request in bookie
* server. You should set a suitable rereplicationEntryBatchSize to avoid bookie server pressure.
*
*/
public boolean isRepairedPlacementPolicyNotAdheringBookieEnable() {
return getBoolean(REPAIRED_PLACEMENT_POLICY_NOT_ADHERING_BOOKIE_ENABLED, false);
}
/**
* Sets the grace period (in seconds) for underreplicated ledgers recovery.
* If ledger is marked underreplicated for more than this period then it
* will be reported by placementPolicyCheck in Auditor. Setting this to 0
* will disable this check.
*
* @param gracePeriod
* The interval in seconds. e.g. 3600 = 1 hour
*/
public void setUnderreplicatedLedgerRecoveryGracePeriod(long gracePeriod) {
setProperty(UNDERREPLICATED_LEDGER_RECOVERY_GRACE_PERIOD, gracePeriod);
}
/**
* Gets the grace period (in seconds) for underreplicated ledgers recovery.
* If ledger is marked underreplicated for more than this period then it
* will be reported by placementPolicyCheck in Auditor. Setting this to 0
* will disable this check.
*
* @return The interval in seconds. By default it is disabled.
*/
public long getUnderreplicatedLedgerRecoveryGracePeriod() {
return getLong(UNDERREPLICATED_LEDGER_RECOVERY_GRACE_PERIOD, 0);
}
/**
* Sets the interval at which the auditor will run a replicas check of all
* ledgers. This should not be run very often since it validates
* availability of replicas of all ledgers by querying bookies. Setting this
* to 0 will disable the periodic replicas check.
*
* @param interval
* The interval in seconds. e.g. 86400 = 1 day, 604800 = 1 week
*/
public void setAuditorPeriodicReplicasCheckInterval(long interval) {
setProperty(AUDITOR_REPLICAS_CHECK_INTERVAL, interval);
}
/**
* Get the interval at which the auditor does replicas check of all ledgers.
*
* @return The interval in seconds. By default it is disabled.
*/
public long getAuditorPeriodicReplicasCheckInterval() {
return getLong(AUDITOR_REPLICAS_CHECK_INTERVAL, 0);
}
/**
* Get the semaphore limit value of getting ledger from zookeeper in auto recovery.
*
* @return The semaphore value. By default it is 500.
*/
public int getAuditorMaxNumberOfConcurrentOpenLedgerOperations() {
return getInt(AUDITOR_MAX_NUMBER_OF_CONCURRENT_OPEN_LEDGER_OPERATIONS, 500);
}
/**
* Set the semaphore limit value for getting ledger from zookeeper in auto recovery.
* @param semaphore
*/
public void setAuditorMaxNumberOfConcurrentOpenLedgerOperations(int semaphore) {
setProperty(AUDITOR_MAX_NUMBER_OF_CONCURRENT_OPEN_LEDGER_OPERATIONS, semaphore);
}
/**
* Get the acquire concurrent open ledger operations timeout.
*
* @return The timeout values. By default it is 120000ms
*/
public int getAuditorAcquireConcurrentOpenLedgerOperationsTimeoutMSec() {
return getInt(AUDITOR_ACQUIRE_CONCURRENT_OPEN_LEDGER_OPERATIONS_TIMEOUT_MSEC, 120000);
}
/**
* Set the acquire concurrent open ledger operations timeout.
* @param timeoutMs
*/
public void setAuditorAcquireConcurrentOpenLedgerOperationsTimeoutMSec(int timeoutMs) {
setProperty(AUDITOR_ACQUIRE_CONCURRENT_OPEN_LEDGER_OPERATIONS_TIMEOUT_MSEC, timeoutMs);
}
/**
* Set what percentage of a ledger (fragment)'s entries will be verified.
* 0 - only the first and last entry of each ledger fragment would be verified
* 100 - the entire ledger fragment would be verified
* anything else - randomly picked entries from over the fragment would be verifiec
* @param auditorLedgerVerificationPercentage The verification proportion as a percentage
* @return ServerConfiguration
*/
public ServerConfiguration setAuditorLedgerVerificationPercentage(long auditorLedgerVerificationPercentage) {
setProperty(AUDITOR_LEDGER_VERIFICATION_PERCENTAGE, auditorLedgerVerificationPercentage);
return this;
}
/**
* Get what percentage of a ledger (fragment)'s entries will be verified.
* @see #setAuditorLedgerVerificationPercentage(long)
* @return percentage of a ledger (fragment)'s entries will be verified. Default is 0.
*/
public long getAuditorLedgerVerificationPercentage() {
return getLong(AUDITOR_LEDGER_VERIFICATION_PERCENTAGE, 0);
}
/**
* Sets that whether the auto-recovery service can start along with Bookie
* server itself or not.
*
* @param enabled
* - true if need to start auto-recovery service. Otherwise
* false.
* @return ServerConfiguration
*/
public ServerConfiguration setAutoRecoveryDaemonEnabled(boolean enabled) {
setProperty(AUTO_RECOVERY_DAEMON_ENABLED, enabled);
return this;
}
/**
* Get whether the Bookie itself can start auto-recovery service also or not.
*
* @return true - if Bookie should start auto-recovery service along with
* it. false otherwise.
*/
public boolean isAutoRecoveryDaemonEnabled() {
return getBoolean(AUTO_RECOVERY_DAEMON_ENABLED, false);
}
/**
* Get how long to delay the recovery of ledgers of a lost bookie.
*
* @return delay interval in seconds
*/
public int getLostBookieRecoveryDelay() {
return getInt(LOST_BOOKIE_RECOVERY_DELAY, 0);
}
/**
* Set the delay interval for starting recovery of a lost bookie.
*/
public void setLostBookieRecoveryDelay(int interval) {
setProperty(LOST_BOOKIE_RECOVERY_DELAY, interval);
}
/**
* Get how long to backoff when encountering exception on rereplicating a ledger.
*
* @return backoff time in milliseconds
*/
public int getRwRereplicateBackoffMs() {
return getInt(RW_REREPLICATE_BACKOFF_MS, 5000);
}
/**
* Set how long to backoff when encountering exception on rereplicating a ledger.
*
* @param backoffMs backoff time in milliseconds
*/
public void setRwRereplicateBackoffMs(int backoffMs) {
setProperty(RW_REREPLICATE_BACKOFF_MS, backoffMs);
}
/**
* Sets that whether force start a bookie in readonly mode.
*
* @param enabled
* - true if need to start a bookie in read only mode. Otherwise
* false.
* @return ServerConfiguration
*/
public ServerConfiguration setForceReadOnlyBookie(boolean enabled) {
setProperty(FORCE_READ_ONLY_BOOKIE, enabled);
return this;
}
/**
* Get whether the Bookie is force started in read only mode or not.
*
* @return true - if need to start a bookie in read only mode. Otherwise
* false.
*/
public boolean isForceReadOnlyBookie() {
return getBoolean(FORCE_READ_ONLY_BOOKIE, false);
}
/**
* Get whether use bytes to throttle garbage collector compaction or not.
*
* @return true - use Bytes,
* false - use Entries.
*/
public boolean getIsThrottleByBytes() {
return getBoolean(IS_THROTTLE_BY_BYTES, false);
}
/**
* Set whether use bytes to throttle garbage collector compaction or not.
*
* @param byBytes true to use by bytes; false to use by entries
*
* @return ServerConfiguration
*/
public ServerConfiguration setIsThrottleByBytes(boolean byBytes) {
setProperty(IS_THROTTLE_BY_BYTES, byBytes);
return this;
}
/**
* Get the maximum number of entries which can be compacted without flushing.
* Default is 100,000.
*
* @return the maximum number of unflushed entries
*/
public int getCompactionMaxOutstandingRequests() {
return getInt(COMPACTION_MAX_OUTSTANDING_REQUESTS, 100000);
}
/**
* Set the maximum number of entries which can be compacted without flushing.
*
* <p>When compacting, the entries are written to the entrylog and the new offsets
* are cached in memory. Once the entrylog is flushed the index is updated with
* the new offsets. This parameter controls the number of entries added to the
* entrylog before a flush is forced. A higher value for this parameter means
* more memory will be used for offsets. Each offset consists of 3 longs.
*
* <p>This parameter should _not_ be modified unless you know what you're doing.
* The default is 100,000.
*
* @param maxOutstandingRequests number of entries to compact before flushing
*
* @return ServerConfiguration
*/
public ServerConfiguration setCompactionMaxOutstandingRequests(int maxOutstandingRequests) {
setProperty(COMPACTION_MAX_OUTSTANDING_REQUESTS, maxOutstandingRequests);
return this;
}
/**
* Get the rate of compaction adds. Default is 1,000.
*
* @return rate of compaction (adds per second)
* @deprecated replaced by {@link #getCompactionRateByEntries()}
*/
@Deprecated
public int getCompactionRate() {
return getInt(COMPACTION_RATE, 1000);
}
/**
* Set the rate of compaction adds.
*
* @param rate rate of compaction adds (adds entries per second)
*
* @return ServerConfiguration
*/
public ServerConfiguration setCompactionRate(int rate) {
setProperty(COMPACTION_RATE, rate);
return this;
}
/**
* Get the rate of compaction adds. Default is 1,000.
*
* @return rate of compaction (adds entries per second)
*/
public int getCompactionRateByEntries() {
return getInt(COMPACTION_RATE_BY_ENTRIES, getCompactionRate());
}
/**
* Set the rate of compaction adds.
*
* @param rate rate of compaction adds (adds entries per second)
*
* @return ServerConfiguration
*/
public ServerConfiguration setCompactionRateByEntries(int rate) {
setProperty(COMPACTION_RATE_BY_ENTRIES, rate);
return this;
}
/**
* Get the rate of compaction adds. Default is 1,000,000.
*
* @return rate of compaction (adds bytes per second)
*/
public int getCompactionRateByBytes() {
return getInt(COMPACTION_RATE_BY_BYTES, 1000000);
}
/**
* Set the rate of compaction adds.
*
* @param rate rate of compaction adds (adds bytes per second)
*
* @return ServerConfiguration
*/
public ServerConfiguration setCompactionRateByBytes(int rate) {
setProperty(COMPACTION_RATE_BY_BYTES, rate);
return this;
}
/**
* Should we remove pages from page cache after force write.
*
* @return remove pages from cache
*/
@Beta
public boolean getJournalRemovePagesFromCache() {
return getBoolean(JOURNAL_REMOVE_FROM_PAGE_CACHE, true);
}
/**
* Sets that whether should we remove pages from page cache after force write.
*
* @param enabled
* - true if we need to remove pages from page cache. otherwise, false
* @return ServerConfiguration
*/
public ServerConfiguration setJournalRemovePagesFromCache(boolean enabled) {
setProperty(JOURNAL_REMOVE_FROM_PAGE_CACHE, enabled);
return this;
}
/*
* Get the {@link LedgerStorage} implementation class name.
*
* @return the class name
*/
public String getLedgerStorageClass() {
String ledgerStorageClass = LEDGER_STORAGE_CLASS_KEY.getString(this);
if (ledgerStorageClass.equals(SortedLedgerStorage.class.getName())
&& !getSortedLedgerStorageEnabled()) {
// This is to retain compatibility with BK-4.3 configuration
// In BK-4.3, the ledger storage is configured through the "sortedLedgerStorageEnabled" flag :
// sortedLedgerStorageEnabled == true (default) ---> use SortedLedgerStorage
// sortedLedgerStorageEnabled == false ---> use InterleavedLedgerStorage
//
// Since BK-4.4, one can specify the implementation class, but if it was using InterleavedLedgerStorage it
// should continue to use that with the same configuration
ledgerStorageClass = InterleavedLedgerStorage.class.getName();
}
return ledgerStorageClass;
}
/**
* Set the {@link LedgerStorage} implementation class name.
*
* @param ledgerStorageClass the class name
* @return ServerConfiguration
*/
public ServerConfiguration setLedgerStorageClass(String ledgerStorageClass) {
LEDGER_STORAGE_CLASS_KEY.set(this, ledgerStorageClass);
return this;
}
/**
* Get whether bookie is using hostname for registration and in ledger
* metadata. Defaults to false.
*
* @return true, then bookie will be registered with its hostname and
* hostname will be used in ledger metadata. Otherwise bookie will
* use its ipaddress
*/
public boolean getUseHostNameAsBookieID() {
return getBoolean(USE_HOST_NAME_AS_BOOKIE_ID, false);
}
/**
* Configure the bookie to use its hostname to register with the
* co-ordination service(eg: zookeeper) and in ledger metadata.
*
* @see #getUseHostNameAsBookieID
* @param useHostName
* whether to use hostname for registration and in ledgermetadata
* @return server configuration
*/
public ServerConfiguration setUseHostNameAsBookieID(boolean useHostName) {
setProperty(USE_HOST_NAME_AS_BOOKIE_ID, useHostName);
return this;
}
/**
* If bookie is using hostname for registration and in ledger metadata then
* whether to use short hostname or FQDN hostname. Defaults to false.
*
* @return true, then bookie will be registered with its short hostname and
* short hostname will be used in ledger metadata. Otherwise bookie
* will use its FQDN hostname
*/
public boolean getUseShortHostName() {
return getBoolean(USE_SHORT_HOST_NAME, false);
}
/**
* Configure the bookie to use its short hostname or FQDN hostname to
* register with the co-ordination service(eg: zookeeper) and in ledger
* metadata.
*
* @see #getUseShortHostName
* @param useShortHostName
* whether to use short hostname for registration and in
* ledgermetadata
* @return server configuration
*/
public ServerConfiguration setUseShortHostName(boolean useShortHostName) {
setProperty(USE_SHORT_HOST_NAME, useShortHostName);
return this;
}
/**
* Get whether to listen for local JVM clients. Defaults to false.
*
* @return true, then bookie will be listen for local JVM clients
*/
public boolean isEnableLocalTransport() {
return getBoolean(ENABLE_LOCAL_TRANSPORT, false);
}
/**
* Configure the bookie to listen for BookKeeper clients executed on the local JVM.
*
* @see #isEnableLocalTransport
* @param enableLocalTransport
* whether to use listen for local JVM clients
* @return server configuration
*/
public ServerConfiguration setEnableLocalTransport(boolean enableLocalTransport) {
setProperty(ENABLE_LOCAL_TRANSPORT, enableLocalTransport);
return this;
}
/**
* Get whether to disable bind of server-side sockets. Defaults to false.
*
* @return true, then bookie will not listen for network connections
*/
public boolean isDisableServerSocketBind() {
return getBoolean(DISABLE_SERVER_SOCKET_BIND, false);
}
/**
* Configure the bookie to disable bind on network interfaces,
* this bookie will be available only to BookKeeper clients executed on the local JVM.
*
* @see #isDisableServerSocketBind
* @param disableServerSocketBind
* whether to disable binding on network interfaces
* @return server configuration
*/
public ServerConfiguration setDisableServerSocketBind(boolean disableServerSocketBind) {
setProperty(DISABLE_SERVER_SOCKET_BIND, disableServerSocketBind);
return this;
}
/**
* Get the stats provider used by bookie.
*
* @return stats provider class
* @throws ConfigurationException
*/
public Class<? extends StatsProvider> getStatsProviderClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, STATS_PROVIDER_CLASS,
NullStatsProvider.class, StatsProvider.class,
DEFAULT_LOADER);
}
/**
* Set the stats provider used by bookie.
*
* @param providerClass
* stats provider class
* @return server configuration
*/
public ServerConfiguration setStatsProviderClass(Class<? extends StatsProvider> providerClass) {
setProperty(STATS_PROVIDER_CLASS, providerClass.getName());
return this;
}
/**
* Flag to enable sanity check metrics in bookie stats. Defaults to false/disabled.
*
* @return true, if bookie collects sanity check metrics in stats
*/
public boolean isSanityCheckMetricsEnabled() {
return getBoolean(SANITY_CHECK_METRICS_ENABLED, false);
}
/**
* Enable sanity check metrics in bookie stats.
*
* @param sanityCheckMetricsEnabled
* flag to enable sanity check metrics
* @return server configuration
*/
public ServerConfiguration setSanityCheckMetricsEnabled(boolean sanityCheckMetricsEnabled) {
setProperty(SANITY_CHECK_METRICS_ENABLED, sanityCheckMetricsEnabled);
return this;
}
/**
* Validate the configuration.
* @throws ConfigurationException
*/
public void validate() throws ConfigurationException {
// generate config def
ConfigDef configDef = ConfigDef.of(ServerConfiguration.class);
try {
configDef.validate(this);
} catch (ConfigException e) {
throw new ConfigurationException(e.getMessage(), e.getCause());
}
if (getSkipListArenaChunkSize() < getSkipListArenaMaxAllocSize()) {
throw new ConfigurationException("Arena max allocation size should be smaller than the chunk size.");
}
if (getJournalAlignmentSize() < 512 || getJournalAlignmentSize() % 512 != 0) {
throw new ConfigurationException("Invalid journal alignment size : " + getJournalAlignmentSize());
}
if (getJournalAlignmentSize() > getJournalPreAllocSizeMB() * 1024 * 1024) {
throw new ConfigurationException("Invalid preallocation size : " + getJournalPreAllocSizeMB() + " MB");
}
if (0 == getBookiePort() && !getAllowEphemeralPorts()) {
throw new ConfigurationException("Invalid port specified, using ephemeral ports accidentally?");
}
if (isEntryLogPerLedgerEnabled() && getUseTransactionalCompaction()) {
throw new ConfigurationException(
"When entryLogPerLedger is enabled , it is unnecessary to use transactional compaction");
}
if ((getJournalFormatVersionToWrite() >= 6) ^ (getFileInfoFormatVersionToWrite() >= 1)) {
throw new ConfigurationException("For persisiting explicitLac, journalFormatVersionToWrite should be >= 6"
+ "and FileInfoFormatVersionToWrite should be >= 1");
}
if (getMinorCompactionInterval() > 0 && getMinorCompactionInterval() * SECOND < getGcWaitTime()) {
throw new ConfigurationException("minorCompactionInterval should be >= gcWaitTime.");
}
if (getMajorCompactionInterval() > 0 && getMajorCompactionInterval() * SECOND < getGcWaitTime()) {
throw new ConfigurationException("majorCompactionInterval should be >= gcWaitTime.");
}
}
/**
* Get Recv ByteBuf allocator initial buf size.
*
* @return initial byteBuf size
*/
public int getRecvByteBufAllocatorSizeInitial() {
return getInt(BYTEBUF_ALLOCATOR_SIZE_INITIAL, 64 * 1024);
}
/**
* Set Recv ByteBuf allocator initial buf size.
*
* @param size
* buffer size
*/
public void setRecvByteBufAllocatorSizeInitial(int size) {
setProperty(BYTEBUF_ALLOCATOR_SIZE_INITIAL, size);
}
/**
* Get Recv ByteBuf allocator min buf size.
*
* @return min byteBuf size
*/
public int getRecvByteBufAllocatorSizeMin() {
return getInt(BYTEBUF_ALLOCATOR_SIZE_MIN, 64 * 1024);
}
/**
* Set Recv ByteBuf allocator min buf size.
*
* @param size
* buffer size
*/
public void setRecvByteBufAllocatorSizeMin(int size) {
setProperty(BYTEBUF_ALLOCATOR_SIZE_MIN, size);
}
/**
* Get Recv ByteBuf allocator max buf size.
*
* @return max byteBuf size
*/
public int getRecvByteBufAllocatorSizeMax() {
return getInt(BYTEBUF_ALLOCATOR_SIZE_MAX, 1 * 1024 * 1024);
}
/**
* Set Recv ByteBuf allocator max buf size.
*
* @param size
* buffer size
*/
public void setRecvByteBufAllocatorSizeMax(int size) {
setProperty(BYTEBUF_ALLOCATOR_SIZE_MAX, size);
}
/**
* Set the bookie authentication provider factory class name.
* If this is not set, no authentication will be used.
*
* @param factoryClass
* the bookie authentication provider factory class name
*/
public void setBookieAuthProviderFactoryClass(String factoryClass) {
setProperty(BOOKIE_AUTH_PROVIDER_FACTORY_CLASS, factoryClass);
}
/**
* Get the bookie authentication provider factory class name.
* If this returns null, no authentication will take place.
*
* @return the bookie authentication provider factory class name or null.
*/
public String getBookieAuthProviderFactoryClass() {
return getString(BOOKIE_AUTH_PROVIDER_FACTORY_CLASS, null);
}
/**
* {@inheritDoc}
*/
@Override
public ServerConfiguration setNettyMaxFrameSizeBytes(int maxSize) {
super.setNettyMaxFrameSizeBytes(maxSize);
return this;
}
/**
* Get the truststore type for client. Default is JKS.
*
* @return
*/
public String getTLSTrustStoreType() {
return getString(TLS_TRUSTSTORE_TYPE, "JKS");
}
/**
* Set the keystore type for client.
*
* @return
*/
public ServerConfiguration setTLSKeyStoreType(String arg) {
setProperty(TLS_KEYSTORE_TYPE, arg);
return this;
}
/**
* Get the keystore path for the client.
*
* @return
*/
public String getTLSKeyStore() {
return getString(TLS_KEYSTORE, null);
}
/**
* Set the keystore path for the client.
*
* @return
*/
public ServerConfiguration setTLSKeyStore(String arg) {
setProperty(TLS_KEYSTORE, arg);
return this;
}
/**
* Get the path to file containing keystore password if the client keystore is password protected. Default is null.
*
* @return
*/
public String getTLSKeyStorePasswordPath() {
return getString(TLS_KEYSTORE_PASSWORD_PATH, null);
}
/**
* Set the path to file containing keystore password, if the client keystore is password protected.
*
* @return
*/
public ServerConfiguration setTLSKeyStorePasswordPath(String arg) {
setProperty(TLS_KEYSTORE_PASSWORD_PATH, arg);
return this;
}
/**
* Get the keystore type for client. Default is JKS.
*
* @return
*/
public String getTLSKeyStoreType() {
return getString(TLS_KEYSTORE_TYPE, "JKS");
}
/**
* Set the truststore type for client.
*
* @return
*/
public ServerConfiguration setTLSTrustStoreType(String arg) {
setProperty(TLS_TRUSTSTORE_TYPE, arg);
return this;
}
/**
* Get the truststore path for the client.
*
* @return
*/
public String getTLSTrustStore() {
return getString(TLS_TRUSTSTORE, null);
}
/**
* Set the truststore path for the client.
*
* @return
*/
public ServerConfiguration setTLSTrustStore(String arg) {
setProperty(TLS_TRUSTSTORE, arg);
return this;
}
/**
* Get the path to file containing truststore password if the client truststore is password protected. Default is
* null.
*
* @return
*/
public String getTLSTrustStorePasswordPath() {
return getString(TLS_TRUSTSTORE_PASSWORD_PATH, null);
}
/**
* Set the path to file containing truststore password, if the client truststore is password protected.
*
* @return
*/
public ServerConfiguration setTLSTrustStorePasswordPath(String arg) {
setProperty(TLS_TRUSTSTORE_PASSWORD_PATH, arg);
return this;
}
/**
* Get the path to file containing TLS Certificate.
*
* @return
*/
public String getTLSCertificatePath() {
return getString(TLS_CERTIFICATE_PATH, null);
}
/**
* Set the path to file containing TLS Certificate.
*
* @return
*/
public ServerConfiguration setTLSCertificatePath(String arg) {
setProperty(TLS_CERTIFICATE_PATH, arg);
return this;
}
/**
* Whether to enable recording task execution stats.
*
* @return flag to enable/disable recording task execution stats.
*/
public boolean getEnableTaskExecutionStats() {
return getBoolean(ENABLE_TASK_EXECUTION_STATS, false);
}
/**
* Enable/Disable recording task execution stats.
*
* @param enabled
* flag to enable/disable recording task execution stats.
* @return client configuration.
*/
public ServerConfiguration setEnableTaskExecutionStats(boolean enabled) {
setProperty(ENABLE_TASK_EXECUTION_STATS, enabled);
return this;
}
/**
* Gets the minimum safe Usable size to be available in index directory for Bookie to create Index File while
* replaying journal at the time of Bookie Start in Readonly Mode (in bytes).
*
* @return minimum safe usable size to be available in index directory for bookie to create index files.
* @see #setMinUsableSizeForIndexFileCreation(long)
*/
public long getMinUsableSizeForIndexFileCreation() {
return this.getLong(MIN_USABLESIZE_FOR_INDEXFILE_CREATION, 100 * 1024 * 1024L);
}
/**
* Sets the minimum safe Usable size to be available in index directory for Bookie to create Index File while
* replaying journal at the time of Bookie Start in Readonly Mode (in bytes).
*
* <p>This parameter allows creating index files when there are enough disk spaces, even when the bookie
* is running at readonly mode because of the disk usage is exceeding {@link #getDiskUsageThreshold()}. Because
* compaction, journal replays can still write index files to disks when a bookie is readonly.
*
* @param minUsableSizeForIndexFileCreation min usable size for index file creation
* @return server configuration
*/
public ServerConfiguration setMinUsableSizeForIndexFileCreation(long minUsableSizeForIndexFileCreation) {
this.setProperty(MIN_USABLESIZE_FOR_INDEXFILE_CREATION, minUsableSizeForIndexFileCreation);
return this;
}
/**
* Gets the minimum safe usable size to be available in ledger directory for Bookie to create entry log files.
*
* @return minimum safe usable size to be available in ledger directory for entry log file creation.
* @see #setMinUsableSizeForEntryLogCreation(long)
*/
public long getMinUsableSizeForEntryLogCreation() {
return this.getLong(MIN_USABLESIZE_FOR_ENTRYLOG_CREATION, (long) 1.2 * getEntryLogSizeLimit());
}
/**
* Sets the minimum safe usable size to be available in ledger directory for Bookie to create entry log files.
*
* <p>This parameter allows creating entry log files when there are enough disk spaces, even when the bookie
* is running at readonly mode because of the disk usage is exceeding {@link #getDiskUsageThreshold()}. Because
* compaction, journal replays can still write data to disks when a bookie is readonly.
*
* @param minUsableSizeForEntryLogCreation minimum safe usable size to be available in ledger directory
* @return server configuration
*/
public ServerConfiguration setMinUsableSizeForEntryLogCreation(long minUsableSizeForEntryLogCreation) {
this.setProperty(MIN_USABLESIZE_FOR_ENTRYLOG_CREATION, minUsableSizeForEntryLogCreation);
return this;
}
/**
* Gets the minimum safe usable size to be available in ledger directory for Bookie to accept high priority writes.
*
* <p>If not set, it is the value of {@link #getMinUsableSizeForEntryLogCreation()}.
*
* @return the minimum safe usable size per ledger directory for bookie to accept high priority writes.
*/
public long getMinUsableSizeForHighPriorityWrites() {
return this.getLong(MIN_USABLESIZE_FOR_HIGH_PRIORITY_WRITES, getMinUsableSizeForEntryLogCreation());
}
/**
* Sets the minimum safe usable size to be available in ledger directory for Bookie to accept high priority writes.
*
* @param minUsableSizeForHighPriorityWrites minimum safe usable size per ledger directory for Bookie to accept
* high priority writes
* @return server configuration.
*/
public ServerConfiguration setMinUsableSizeForHighPriorityWrites(long minUsableSizeForHighPriorityWrites) {
this.setProperty(MIN_USABLESIZE_FOR_HIGH_PRIORITY_WRITES, minUsableSizeForHighPriorityWrites);
return this;
}
/**
* returns whether it is allowed to have multiple ledger/index/journal
* Directories in the same filesystem diskpartition.
*
* @return
*/
public boolean isAllowMultipleDirsUnderSameDiskPartition() {
return this.getBoolean(ALLOW_MULTIPLEDIRS_UNDER_SAME_DISKPARTITION, true);
}
/**
* Configure the Bookie to allow/disallow multiple ledger/index/journal
* directories in the same filesystem diskpartition.
*
* @param allow
*
* @return server configuration object.
*/
public ServerConfiguration setAllowMultipleDirsUnderSameDiskPartition(boolean allow) {
this.setProperty(ALLOW_MULTIPLEDIRS_UNDER_SAME_DISKPARTITION, allow);
return this;
}
/**
* Get whether to start the http server or not.
*
* @return true - if http server should start
*/
public boolean isHttpServerEnabled() {
return getBoolean(HTTP_SERVER_ENABLED, false);
}
/**
* Set whether to start the http server or not.
*
* @param enabled
* - true if we should start http server
* @return ServerConfiguration
*/
public ServerConfiguration setHttpServerEnabled(boolean enabled) {
setProperty(HTTP_SERVER_ENABLED, enabled);
return this;
}
/**
* Get the http server port.
*
* @return http server port
*/
public int getHttpServerPort() {
return getInt(HTTP_SERVER_PORT, 8080);
}
/**
* Set Http server port listening on.
*
* @param port
* Port to listen on
* @return server configuration
*/
public ServerConfiguration setHttpServerPort(int port) {
setProperty(HTTP_SERVER_PORT, port);
return this;
}
/**
* Get the http server host.
*
* @return http server host
*/
public String getHttpServerHost() {
return getString(HTTP_SERVER_HOST, "0.0.0.0");
}
/**
* Set Http server host listening on.
*
* @param host
* host to listen on
* @return server configuration
*/
public ServerConfiguration setHttpServerHost(String host) {
setProperty(HTTP_SERVER_HOST, host);
return this;
}
/**
* Get if Http Server Tls enable.
* @return
*/
public boolean isHttpServerTlsEnable() {
return getBoolean(HTTP_SERVER_TLS_ENABLE, false);
}
/**
* Set if Http Server Tls enable.
* @param tlsEnable
* @return server configuration
*/
public ServerConfiguration setHttpServerTlsEnable(boolean tlsEnable) {
setProperty(HTTP_SERVER_TLS_ENABLE, tlsEnable);
return this;
}
/**
* Get the http server keystore path.
*
* @return http server keystore path
*/
public String getHttpServerKeystorePath() {
return getString(HTTP_SERVER_KEY_STORE_PATH);
}
/**
* Set Http server keystore path.
*
* @param keystorePath
* http server keystore path
* @return server configuration
*/
public ServerConfiguration setHttpServerKeystorePath(String keystorePath) {
setProperty(HTTP_SERVER_KEY_STORE_PATH, keystorePath);
return this;
}
/**
* Get the http server keyStore password.
*
* @return http server keyStore password
*/
public String getHttpServerKeystorePassword() {
return getString(HTTP_SERVER_KEY_STORE_PASSWORD);
}
/**
* Set Http server keyStore password.
*
* @param keyStorePassword
* http server keyStore password
* @return server configuration
*/
public ServerConfiguration setHttpServerKeyStorePassword(String keyStorePassword) {
setProperty(HTTP_SERVER_KEY_STORE_PASSWORD, keyStorePassword);
return this;
}
/**
* Get the http server trustStore path.
*
* @return http server trustStore path
*/
public String getHttpServerTrustStorePath() {
return getString(HTTP_SERVER_TRUST_STORE_PATH);
}
/**
* Set Http server trustStore path.
*
* @param trustStorePath
* http server trustStore path
* @return server configuration
*/
public ServerConfiguration setHttpServerTrustStorePath(String trustStorePath) {
setProperty(HTTP_SERVER_TRUST_STORE_PATH, trustStorePath);
return this;
}
/**
* Get the http server trustStore password.
*
* @return http server trustStore password
*/
public String getHttpServerTrustStorePassword() {
return getString(HTTP_SERVER_KEY_STORE_PASSWORD);
}
/**
* Set Http server trustStore password.
*
* @param trustStorePassword
* http server trustStore password
* @return server configuration
*/
public ServerConfiguration setHttpServerTrustStorePasswordPassword(String trustStorePassword) {
setProperty(HTTP_SERVER_TRUST_STORE_PASSWORD, trustStorePassword);
return this;
}
/**
* Get the extra list of server lifecycle components to enable on a bookie server.
*
* @return the extra list of server lifecycle components to enable on a bookie server.
*/
public String[] getExtraServerComponents() {
String extraServerComponentsStr = getString(EXTRA_SERVER_COMPONENTS);
if (Strings.isNullOrEmpty(extraServerComponentsStr)) {
return null;
}
return this.getStringArray(EXTRA_SERVER_COMPONENTS);
}
/**
* Set the extra list of server lifecycle components to enable on a bookie server.
*
* @param componentClasses
* the list of server lifecycle components to enable on a bookie server.
* @return server configuration.
*/
public ServerConfiguration setExtraServerComponents(String[] componentClasses) {
this.setProperty(EXTRA_SERVER_COMPONENTS, componentClasses);
return this;
}
/**
* Return the flag whether to ignore startup failures on loading server components specified at
* {@link #getExtraServerComponents()}.
*
* @return the flag whether to ignore startup failures on loading server components specified at
* {@link #getExtraServerComponents()}. The default value is <tt>false</tt>.
*/
public boolean getIgnoreExtraServerComponentsStartupFailures() {
return getBoolean(IGNORE_EXTRA_SERVER_COMPONENTS_STARTUP_FAILURES, false);
}
/**
* Set the flag whether to ignore startup failures on loading server components specified at
* {@link #getExtraServerComponents()}.
*
* @param enabled flag to enable/disable ignoring startup failures on loading server components.
* @return server configuration.
*/
public ServerConfiguration setIgnoreExtraServerComponentsStartupFailures(boolean enabled) {
setProperty(IGNORE_EXTRA_SERVER_COMPONENTS_STARTUP_FAILURES, enabled);
return this;
}
/**
* Get server netty channel write buffer low water mark.
*
* @return netty channel write buffer low water mark.
*/
public int getServerWriteBufferLowWaterMark() {
return getInt(SERVER_WRITEBUFFER_LOW_WATER_MARK, 384 * 1024);
}
/**
* Set server netty channel write buffer low water mark.
*
* @param waterMark
* netty channel write buffer low water mark.
* @return client configuration.
*/
public ServerConfiguration setServerWriteBufferLowWaterMark(int waterMark) {
setProperty(SERVER_WRITEBUFFER_LOW_WATER_MARK, waterMark);
return this;
}
/**
* Get server netty channel write buffer high water mark.
*
* @return netty channel write buffer high water mark.
*/
public int getServerWriteBufferHighWaterMark() {
return getInt(SERVER_WRITEBUFFER_HIGH_WATER_MARK, 512 * 1024);
}
/**
* Set server netty channel write buffer high water mark.
*
* @param waterMark
* netty channel write buffer high water mark.
* @return client configuration.
*/
public ServerConfiguration setServerWriteBufferHighWaterMark(int waterMark) {
setProperty(SERVER_WRITEBUFFER_HIGH_WATER_MARK, waterMark);
return this;
}
/**
* Set registration manager class.
*
* @param regManagerClass
* ManagerClass
* @deprecated since 4.7.0, in favor of using {@link #setMetadataServiceUri(String)}
*/
@Deprecated
public void setRegistrationManagerClass(
Class<? extends RegistrationManager> regManagerClass) {
setProperty(REGISTRATION_MANAGER_CLASS, regManagerClass);
}
/**
* Get Registration Manager Class.
*
* @return registration manager class.
* @deprecated since 4.7.0, in favor of using {@link #getMetadataServiceUri()}
*/
@Deprecated
public Class<? extends RegistrationManager> getRegistrationManagerClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, REGISTRATION_MANAGER_CLASS,
ZKRegistrationManager.class, RegistrationManager.class,
DEFAULT_LOADER);
}
@Override
protected ServerConfiguration getThis() {
return this;
}
/*
* specifies if entryLog per ledger is enabled. If it is enabled, then there
* would be a active entrylog for each ledger
*/
public boolean isEntryLogPerLedgerEnabled() {
return this.getBoolean(ENTRY_LOG_PER_LEDGER_ENABLED, false);
}
/*
* enables/disables entrylog per ledger feature.
*
*/
public ServerConfiguration setEntryLogPerLedgerEnabled(boolean entryLogPerLedgerEnabled) {
this.setProperty(ENTRY_LOG_PER_LEDGER_ENABLED, Boolean.toString(entryLogPerLedgerEnabled));
return this;
}
/*
* In the case of multipleentrylogs, multiple threads can be used to flush the memtable.
*
* Gets the number of threads used to flush entrymemtable
*/
public int getNumOfMemtableFlushThreads() {
return this.getInt(NUMBER_OF_MEMTABLE_FLUSH_THREADS, 8);
}
/*
* Sets the number of threads used to flush entrymemtable, in the case of multiple entrylogs
*
*/
public ServerConfiguration setNumOfMemtableFlushThreads(int numOfMemtableFlushThreads) {
this.setProperty(NUMBER_OF_MEMTABLE_FLUSH_THREADS, Integer.toString(numOfMemtableFlushThreads));
return this;
}
/*
* in entryLogPerLedger feature, this specifies the time, once this duration
* has elapsed after the entry's last access, that entry should be
* automatically removed from the cache
*/
public int getEntrylogMapAccessExpiryTimeInSeconds() {
return this.getInt(ENTRYLOGMAP_ACCESS_EXPIRYTIME_INSECONDS, 5 * 60);
}
/*
* sets the time duration for entrylogMapAccessExpiryTimeInSeconds, which will be used for cache eviction
* policy, in entrylogperledger feature.
*/
public ServerConfiguration setEntrylogMapAccessExpiryTimeInSeconds(int entrylogMapAccessExpiryTimeInSeconds) {
this.setProperty(ENTRYLOGMAP_ACCESS_EXPIRYTIME_INSECONDS,
Integer.toString(entrylogMapAccessExpiryTimeInSeconds));
return this;
}
/*
* get the maximum number of entrylogs that can be active at a given point
* in time.
*/
public int getMaximumNumberOfActiveEntryLogs() {
return this.getInt(MAXIMUM_NUMBER_OF_ACTIVE_ENTRYLOGS, 500);
}
/*
* sets the maximum number of entrylogs that can be active at a given point
* in time.
*/
public ServerConfiguration setMaximumNumberOfActiveEntryLogs(int maximumNumberOfActiveEntryLogs) {
this.setProperty(MAXIMUM_NUMBER_OF_ACTIVE_ENTRYLOGS,
Integer.toString(maximumNumberOfActiveEntryLogs));
return this;
}
/*
* in EntryLogManagerForEntryLogPerLedger, this config value specifies the
* metrics cache size limits in multiples of entrylogMap cache size limits.
*/
public int getEntryLogPerLedgerCounterLimitsMultFactor() {
return this.getInt(ENTRY_LOG_PER_LEDGER_COUNTER_LIMITS_MULT_FACTOR, 10);
}
/*
* in EntryLogManagerForEntryLogPerLedger, this config value specifies the
* metrics cache size limits in multiples of entrylogMap cache size limits.
*/
public ServerConfiguration setEntryLogPerLedgerCounterLimitsMultFactor(
int entryLogPerLedgerCounterLimitsMultFactor) {
this.setProperty(ENTRY_LOG_PER_LEDGER_COUNTER_LIMITS_MULT_FACTOR,
Integer.toString(entryLogPerLedgerCounterLimitsMultFactor));
return this;
}
/**
* True if a local consistency check should be performed on startup.
*/
public boolean isLocalConsistencyCheckOnStartup() {
return this.getBoolean(LOCAL_CONSISTENCY_CHECK_ON_STARTUP, false);
}
/**
* Get the authorized roles.
*
* @return String array of configured auth roles.
*/
public String[] getAuthorizedRoles() {
return getStringArray(AUTHORIZED_ROLES);
}
/**
* Set authorized roles.
*
* @return Configuration Object with roles set
*/
public ServerConfiguration setAuthorizedRoles(String roles) {
this.setProperty(AUTHORIZED_ROLES, roles);
return this;
}
/**
* Get in flight read entry number when ledger checker.
* Default value is -1 which it is unlimited when ledger checker.
*
* @return read entry number of in flight.
*/
public int getInFlightReadEntryNumInLedgerChecker(){
return getInt(IN_FLIGHT_READ_ENTRY_NUM_IN_LEDGER_CHECKER, -1);
}
/**
* Enabled data integrity checker.
* The data integrity checker checks that the bookie has all the entries which
* ledger metadata asserts it has.
* The checker runs on startup (periodic will be added later).
* The changes how cookies are handled. If a directory is found to be missing a cookie,
* the check runs. The check is divided into two parts, preboot and full.
* The preboot check ensures that it is safe to boot the bookie; the bookie will not
* vote in any operation that contradicts a previous vote.
* The full check ensures that any ledger that claims to have entries on the bookie,
* truly does have data on the bookie. Any missing entries are copies from available
* replicas.
*/
public ServerConfiguration setDataIntegrityCheckingEnabled(boolean enabled) {
this.setProperty(DATA_INTEGRITY_CHECKING_ENABLED,
Boolean.toString(enabled));
return this;
}
/**
* @see #setDataIntegrityCheckingEnabled
*/
public boolean isDataIntegrityCheckingEnabled() {
return this.getBoolean(DATA_INTEGRITY_CHECKING_ENABLED, false);
}
/**
* When this config is set to true and the data integrity checker is also enabled then
* any missing cookie files in the ledger directories do not prevent the bookie from
* booting. Missing cookie files usually indicate an empty disk has been mounted, which
* might be after a disk failure (all data lost) or a provisioning error (wrong disk mounted).
* If there are missing cookie files then:
* - a new cookie is stamped (written to each ledger directory and to the co-ordination service, eg: zookeeper).
* - the data integrity checker will attempt to repair any lost data by sourcing the lost entries from other bookies
* If any cookies do not match the master cookie, then cookie validation still fails as normal.
*/
public ServerConfiguration setDataIntegrityStampMissingCookiesEnabled(boolean enabled) {
this.setProperty(DATA_INTEGRITY_COOKIE_STAMPING_ENABLED,
Boolean.toString(enabled));
return this;
}
/**
* @see #setDataIntegrityStampMissingCookiesEnabled
*/
public boolean isDataIntegrityStampMissingCookiesEnabled() {
return this.getBoolean(DATA_INTEGRITY_COOKIE_STAMPING_ENABLED, false);
}
/**
* When this config is set to true,if we replay journal failed, we will skip.
* @param skipReplayJournalInvalidRecord
* @return
*/
public ServerConfiguration setSkipReplayJournalInvalidRecord(boolean skipReplayJournalInvalidRecord) {
this.setProperty(SKIP_REPLAY_JOURNAL_INVALID_RECORD,
Boolean.toString(skipReplayJournalInvalidRecord));
return this;
}
/**
* @see #isSkipReplayJournalInvalidRecord .
*/
public boolean isSkipReplayJournalInvalidRecord() {
return this.getBoolean(SKIP_REPLAY_JOURNAL_INVALID_RECORD, false);
}
/**
* Get default rocksdb conf.
*
* @return String configured default rocksdb conf.
*/
public String getDefaultRocksDBConf() {
String defaultPath = "conf/default_rocksdb.conf";
URL defURL = getClass().getClassLoader().getResource(defaultPath);
if (defURL != null) {
defaultPath = defURL.getPath();
}
return getString(DEFAULT_ROCKSDB_CONF, defaultPath);
}
/**
* Set default rocksdb conf.
*
* @return Configuration Object with default rocksdb conf
*/
public ServerConfiguration setDefaultRocksDBConf(String defaultRocksdbConf) {
this.setProperty(DEFAULT_ROCKSDB_CONF, defaultRocksdbConf);
return this;
}
/**
* Get entry Location rocksdb conf.
*
* @return String configured entry Location rocksdb conf.
*/
public String getEntryLocationRocksdbConf() {
String defaultPath = "conf/entry_location_rocksdb.conf";
URL defURL = getClass().getClassLoader().getResource(defaultPath);
if (defURL != null) {
defaultPath = defURL.getPath();
}
return getString(ENTRY_LOCATION_ROCKSDB_CONF, defaultPath);
}
/**
* Set entry Location rocksdb conf.
*
* @return Configuration Object with entry Location rocksdb conf
*/
public ServerConfiguration setEntryLocationRocksdbConf(String entryLocationRocksdbConf) {
this.setProperty(ENTRY_LOCATION_ROCKSDB_CONF, entryLocationRocksdbConf);
return this;
}
/**
* Get ledger metadata rocksdb conf.
*
* @return String configured ledger metadata rocksdb conf.
*/
public String getLedgerMetadataRocksdbConf() {
String defaultPath = "conf/ledger_metadata_rocksdb.conf";
URL defURL = getClass().getClassLoader().getResource(defaultPath);
if (defURL != null) {
defaultPath = defURL.getPath();
}
return getString(LEDGER_METADATA_ROCKSDB_CONF, defaultPath);
}
/**
* Set ledger metadata rocksdb conf.
*
* @return Configuration Object with ledger metadata rocksdb conf
*/
public ServerConfiguration setLedgerMetadataRocksdbConf(String ledgerMetadataRocksdbConf) {
this.setProperty(LEDGER_METADATA_ROCKSDB_CONF, ledgerMetadataRocksdbConf);
return this;
}
/**
* Set the max operation numbers in a single rocksdb write batch.
* The rocksdb write batch is related to the memory usage. If the batch is too large, it will cause the OOM.
*
* @param maxNumbersInSingleRocksDBBatch
* @return
*/
public ServerConfiguration setOperationMaxNumbersInSingleRocksDBWriteBatch(int maxNumbersInSingleRocksDBBatch) {
this.setProperty(MAX_OPERATION_NUMBERS_IN_SINGLE_ROCKSDB_WRITE_BATCH, maxNumbersInSingleRocksDBBatch);
return this;
}
/**
* Get the max operation numbers in a single rocksdb write batch.
*
* @return
*/
public int getMaxOperationNumbersInSingleRocksDBBatch() {
return getInt(MAX_OPERATION_NUMBERS_IN_SINGLE_ROCKSDB_WRITE_BATCH, 100000);
}
}
| 311 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/UncheckedConfigurationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.conf;
import java.util.Objects;
import org.apache.commons.configuration.ConfigurationException;
/**
* Wraps a {@link org.apache.commons.configuration.ConfigurationException} with an unchecked exception.
*
* @since 4.7.0
*/
public class UncheckedConfigurationException extends RuntimeException {
/**
* Constructs an instance of this class.
*
* @param message
* the detail message, can be null
* @param cause
* the {@code ConfigurationException}
*
* @throws NullPointerException
* if the cause is {@code null}
*/
public UncheckedConfigurationException(String message, ConfigurationException cause) {
super(message, Objects.requireNonNull(cause));
}
/**
* Constructs an instance of this class.
*
* @param cause
* the {@code IOException}
*
* @throws NullPointerException
* if the cause is {@code null}
*/
public UncheckedConfigurationException(ConfigurationException cause) {
super(Objects.requireNonNull(cause));
}
/**
* Returns the cause of this exception.
*
* @return the {@code IOException} which is the cause of this exception.
*/
@Override
public ConfigurationException getCause() {
return (ConfigurationException) super.getCause();
}
}
| 312 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/AbstractConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.conf;
import static org.apache.bookkeeper.conf.ClientConfiguration.CLIENT_AUTH_PROVIDER_FACTORY_CLASS;
import io.netty.buffer.PooledByteBufAllocator;
import java.net.URL;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import javax.net.ssl.SSLEngine;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.allocator.LeakDetectionPolicy;
import org.apache.bookkeeper.common.allocator.OutOfMemoryPolicy;
import org.apache.bookkeeper.common.allocator.PoolingPolicy;
import org.apache.bookkeeper.common.util.JsonUtil;
import org.apache.bookkeeper.common.util.JsonUtil.ParseJsonException;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.feature.Feature;
import org.apache.bookkeeper.meta.AbstractZkLedgerManagerFactory;
import org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.LongHierarchicalLedgerManagerFactory;
import org.apache.bookkeeper.util.EntryFormatter;
import org.apache.bookkeeper.util.LedgerIdFormatter;
import org.apache.bookkeeper.util.StringEntryFormatter;
import org.apache.commons.configuration.CompositeConfiguration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.configuration.SystemConfiguration;
import org.apache.commons.lang.StringUtils;
/**
* Abstract configuration.
*/
@Slf4j
public abstract class AbstractConfiguration<T extends AbstractConfiguration>
extends CompositeConfiguration {
public static final String READ_SYSTEM_PROPERTIES_PROPERTY = "org.apache.bookkeeper.conf.readsystemproperties";
/**
* Enable the use of System Properties, which was the default behaviour till 4.4.0.
*/
private static final boolean READ_SYSTEM_PROPERTIES = Boolean.getBoolean(READ_SYSTEM_PROPERTIES_PROPERTY);
protected static final ClassLoader DEFAULT_LOADER;
static {
ClassLoader loader = Thread.currentThread().getContextClassLoader();
if (null == loader) {
loader = AbstractConfiguration.class.getClassLoader();
}
DEFAULT_LOADER = loader;
}
// Zookeeper Parameters
protected static final String ZK_TIMEOUT = "zkTimeout";
protected static final String ZK_SERVERS = "zkServers";
protected static final String ZK_RETRY_BACKOFF_MAX_RETRIES = "zkRetryBackoffMaxRetries";
// Ledger Manager
protected static final String LEDGER_MANAGER_TYPE = "ledgerManagerType";
protected static final String LEDGER_MANAGER_FACTORY_CLASS = "ledgerManagerFactoryClass";
protected static final String LEDGER_METADATA_FORMAT_VERSION = "ledgerMetadataVersion";
protected static final String ALLOW_SHADED_LEDGER_MANAGER_FACTORY_CLASS = "allowShadedLedgerManagerFactoryClass";
protected static final String SHADED_LEDGER_MANAGER_FACTORY_CLASS_PREFIX = "shadedLedgerManagerFactoryClassPrefix";
protected static final String METADATA_SERVICE_URI = "metadataServiceUri";
protected static final String ZK_LEDGERS_ROOT_PATH = "zkLedgersRootPath";
protected static final String ZK_REQUEST_RATE_LIMIT = "zkRequestRateLimit";
protected static final String AVAILABLE_NODE = "available";
protected static final String REREPLICATION_ENTRY_BATCH_SIZE = "rereplicationEntryBatchSize";
protected static final String STORE_SYSTEMTIME_AS_LEDGER_UNDERREPLICATED_MARK_TIME =
"storeSystemTimeAsLedgerUnderreplicatedMarkTime";
protected static final String STORE_SYSTEMTIME_AS_LEDGER_CREATION_TIME = "storeSystemTimeAsLedgerCreationTime";
protected static final String ENABLE_BUSY_WAIT = "enableBusyWait";
protected static final String ENABLE_HEALTH_CHECK = "enableHealthCheck";
// Metastore settings, only being used when LEDGER_MANAGER_FACTORY_CLASS is MSLedgerManagerFactory
protected static final String METASTORE_IMPL_CLASS = "metastoreImplClass";
protected static final String METASTORE_MAX_ENTRIES_PER_SCAN = "metastoreMaxEntriesPerScan";
// Common TLS configuration
// TLS Provider (JDK or OpenSSL)
protected static final String TLS_PROVIDER = "tlsProvider";
// TLS provider factory class name
protected static final String TLS_PROVIDER_FACTORY_CLASS = "tlsProviderFactoryClass";
protected static final String LEDGERID_FORMATTER_CLASS = "ledgerIdFormatterClass";
protected static final String ENTRY_FORMATTER_CLASS = "entryFormatterClass";
// Enable authentication of the other connection end point (mutual authentication)
protected static final String TLS_CLIENT_AUTHENTICATION = "tlsClientAuthentication";
// Preserve MDC or not for tasks in executor
protected static final String PRESERVE_MDC_FOR_TASK_EXECUTION = "preserveMdcForTaskExecution";
// Default formatter classes
protected static final Class<? extends EntryFormatter> DEFAULT_ENTRY_FORMATTER = StringEntryFormatter.class;
protected static final Class<? extends LedgerIdFormatter> DEFAULT_LEDGERID_FORMATTER =
LedgerIdFormatter.LongLedgerIdFormatter.class;
protected static final String TLS_CERT_FILES_REFRESH_DURATION_SECONDS = "tlsCertFilesRefreshDurationSeconds";
/**
* This list will be passed to {@link SSLEngine#setEnabledCipherSuites(java.lang.String[]) }.
* Please refer to official JDK JavaDocs
*/
protected static final String TLS_ENABLED_CIPHER_SUITES = "tlsEnabledCipherSuites";
/**
* This list will be passed to {@link SSLEngine#setEnabledProtocols(java.lang.String[]) }.
* Please refer to official JDK JavaDocs
*/
protected static final String TLS_ENABLED_PROTOCOLS = "tlsEnabledProtocols";
/**
* TLS KeyStore, TrustStore, Password files and Certificate Paths.
*/
protected static final String TLS_KEYSTORE_TYPE = "tlsKeyStoreType";
protected static final String TLS_KEYSTORE = "tlsKeyStore";
protected static final String TLS_KEYSTORE_PASSWORD_PATH = "tlsKeyStorePasswordPath";
protected static final String TLS_TRUSTSTORE_TYPE = "tlsTrustStoreType";
protected static final String TLS_TRUSTSTORE = "tlsTrustStore";
protected static final String TLS_TRUSTSTORE_PASSWORD_PATH = "tlsTrustStorePasswordPath";
protected static final String TLS_CERTIFICATE_PATH = "tlsCertificatePath";
//Netty configuration
protected static final String NETTY_MAX_FRAME_SIZE = "nettyMaxFrameSizeBytes";
protected static final int DEFAULT_NETTY_MAX_FRAME_SIZE = 5 * 1024 * 1024; // 5MB
// Zookeeper ACL settings
protected static final String ZK_ENABLE_SECURITY = "zkEnableSecurity";
// Kluge for compatibility testing. Never set this outside tests.
public static final String LEDGER_MANAGER_FACTORY_DISABLE_CLASS_CHECK = "ledgerManagerFactoryDisableClassCheck";
// Validate bookie process user
public static final String PERMITTED_STARTUP_USERS = "permittedStartupUsers";
// minimum number of racks per write quorum
public static final String MIN_NUM_RACKS_PER_WRITE_QUORUM = "minNumRacksPerWriteQuorum";
// enforce minimum number of racks per write quorum
public static final String ENFORCE_MIN_NUM_RACKS_PER_WRITE_QUORUM = "enforceMinNumRacksPerWriteQuorum";
// enforce minimum number of fault domains for write
public static final String ENFORCE_MIN_NUM_FAULT_DOMAINS_FOR_WRITE = "enforceMinNumFaultDomainsForWrite";
// ignore usage of local node in the internal logic of placement policy
public static final String IGNORE_LOCAL_NODE_IN_PLACEMENT_POLICY = "ignoreLocalNodeInPlacementPolicy";
// minimum number of zones per write quorum in ZoneAwarePlacementPolicy
public static final String MIN_NUM_ZONES_PER_WRITE_QUORUM = "minNumZonesPerWriteQuorum";
// desired number of zones per write quorum in ZoneAwarePlacementPolicy
public static final String DESIRED_NUM_ZONES_PER_WRITE_QUORUM = "desiredNumZonesPerWriteQuorum";
// in ZoneawareEnsemblePlacementPolicy if strict placement is enabled then
// minZones/desiredZones in writeQuorum would be maintained otherwise it
// will pick nodes randomly.
public static final String ENFORCE_STRICT_ZONEAWARE_PLACEMENT = "enforceStrictZoneawarePlacement";
// Allocator configuration
protected static final String ALLOCATOR_POOLING_POLICY = "allocatorPoolingPolicy";
protected static final String ALLOCATOR_POOLING_CONCURRENCY = "allocatorPoolingConcurrency";
protected static final String ALLOCATOR_OOM_POLICY = "allocatorOutOfMemoryPolicy";
protected static final String ALLOCATOR_LEAK_DETECTION_POLICY = "allocatorLeakDetectionPolicy";
// option to limit stats logging
public static final String LIMIT_STATS_LOGGING = "limitStatsLogging";
protected static final String REPLICATION_RATE_BY_BYTES = "replicationRateByBytes";
protected AbstractConfiguration() {
super();
if (READ_SYSTEM_PROPERTIES) {
// add configuration for system properties
addConfiguration(new SystemConfiguration());
}
}
/**
* Limit who can start the application to prevent future permission errors.
*/
public void setPermittedStartupUsers(String s) {
setProperty(PERMITTED_STARTUP_USERS, s);
}
/**
* Get array of users specified in this property.
*/
public String[] getPermittedStartupUsers() {
return getStringArray(PERMITTED_STARTUP_USERS);
}
/**
* You can load configurations in precedence order. The first one takes
* precedence over any loaded later.
*
* @param confURL
* Configuration URL
*/
@SuppressWarnings("unchecked")
public void loadConf(URL confURL) throws ConfigurationException {
PropertiesConfiguration loadedConf = new PropertiesConfiguration(confURL);
for (Iterator<String> iter = loadedConf.getKeys(); iter.hasNext(); ) {
String key = iter.next();
setProperty(key, loadedConf.getProperty(key));
}
}
/**
* You can load configuration from other configuration.
*
* @param baseConf
* Other Configuration
*/
@SuppressWarnings("unchecked")
public void loadConf(CompositeConfiguration baseConf) {
for (Iterator<String> iter = baseConf.getKeys(); iter.hasNext(); ) {
String key = iter.next();
setProperty(key, baseConf.getProperty(key));
}
}
/**
* Get metadata service uri.
*
* <p><b>Warning:</b> this method silently converts checked exceptions to unchecked exceptions.
* It is useful to use this method in lambda expressions. However it should not be used with places
* which have logics to handle checked exceptions. In such cases use {@link #getMetadataServiceUri()} instead.
*
* @return metadata service uri
* @throws UncheckedConfigurationException if the metadata service uri is invalid.
*/
public String getMetadataServiceUriUnchecked() throws UncheckedConfigurationException {
try {
return getMetadataServiceUri();
} catch (ConfigurationException e) {
throw new UncheckedConfigurationException(e);
}
}
/**
* Get metadata service uri.
*
* @return metadata service uri.
* @throws ConfigurationException if the metadata service uri is invalid.
*/
public String getMetadataServiceUri() throws ConfigurationException {
String serviceUri = getString(METADATA_SERVICE_URI);
if (StringUtils.isBlank(serviceUri)) {
// no service uri is defined, fallback to old settings
String ledgerManagerType;
ledgerManagerType = getLedgerManagerLayoutStringFromFactoryClass();
String zkServers = getZkServers();
if (null != zkServers) {
// URI doesn't accept ','
serviceUri = String.format(
"zk+%s://%s%s",
ledgerManagerType,
zkServers.replace(",", ";"),
getZkLedgersRootPath());
}
}
return serviceUri;
}
/**
* Set the metadata service uri.
*
* @param serviceUri the metadata service uri.
* @return the configuration object.
*/
public T setMetadataServiceUri(String serviceUri) {
setProperty(METADATA_SERVICE_URI, serviceUri);
return getThis();
}
/**
* Get zookeeper servers to connect.
*
* <p>`zkServers` is deprecating, in favor of using `metadataServiceUri`
*
* @return zookeeper servers
* @deprecated since 4.7.0
*/
@Deprecated
public String getZkServers() {
List servers = getList(ZK_SERVERS, null);
if (null == servers || 0 == servers.size()) {
return null;
}
return StringUtils.join(servers, ",");
}
/**
* Set zookeeper servers to connect.
*
* <p>`zkServers` is deprecating, in favor of using `metadataServiceUri`
*
* @param zkServers
* ZooKeeper servers to connect
*/
@Deprecated
public T setZkServers(String zkServers) {
setProperty(ZK_SERVERS, zkServers);
return getThis();
}
/**
* Get zookeeper timeout.
*
* @return zookeeper server timeout
*/
public int getZkTimeout() {
return getInt(ZK_TIMEOUT, 10000);
}
/**
* Set zookeeper timeout.
*
* @param zkTimeout
* ZooKeeper server timeout
* @return server configuration
*/
public T setZkTimeout(int zkTimeout) {
setProperty(ZK_TIMEOUT, Integer.toString(zkTimeout));
return getThis();
}
/**
* Get zookeeper client backoff max retry times.
*
* @return zk backoff max retry times.
*/
public int getZkRetryBackoffMaxRetries() {
return getInt(ZK_RETRY_BACKOFF_MAX_RETRIES, Integer.MAX_VALUE);
}
/**
* Set zookeeper client backoff max retry times.
*
* @param maxRetries
* backoff max retry times
* @return server configuration.
*/
public T setZkRetryBackoffMaxRetries(int maxRetries) {
setProperty(ZK_RETRY_BACKOFF_MAX_RETRIES, Integer.toString(maxRetries));
return getThis();
}
/**
* Set Ledger Manager Type.
*
* @param lmType
* Ledger Manager Type
* @deprecated replaced by {@link #setLedgerManagerFactoryClass}
*/
@Deprecated
public void setLedgerManagerType(String lmType) {
setProperty(LEDGER_MANAGER_TYPE, lmType);
}
/**
* Get Ledger Manager Type.
*
* @return ledger manager type
*
* @deprecated replaced by {@link #getLedgerManagerFactoryClass()}
*/
@Deprecated
public String getLedgerManagerType() {
return getString(LEDGER_MANAGER_TYPE);
}
/**
* Set the flag to allow using shaded ledger manager factory class for
* instantiating a ledger manager factory.
*
* @param allowed
* the flag to allow/disallow using shaded ledger manager factory class
* @return configuration instance.
*/
public T setAllowShadedLedgerManagerFactoryClass(boolean allowed) {
setProperty(ALLOW_SHADED_LEDGER_MANAGER_FACTORY_CLASS, allowed);
return getThis();
}
/**
* Is shaded ledger manager factory class name allowed to be used for
* instantiating ledger manager factory.
*
* @return ledger manager factory class name.
*/
public boolean isShadedLedgerManagerFactoryClassAllowed() {
return getBoolean(ALLOW_SHADED_LEDGER_MANAGER_FACTORY_CLASS, false);
}
/**
* Set the class prefix of the shaded ledger manager factory class for
* instantiating a ledger manager factory.
*
* <p>This setting only takes effects when {@link #isShadedLedgerManagerFactoryClassAllowed()}
* returns true.
*
* @param classPrefix
* the class prefix of shaded ledger manager factory class
* @return configuration instance.
*/
public T setShadedLedgerManagerFactoryClassPrefix(String classPrefix) {
setProperty(SHADED_LEDGER_MANAGER_FACTORY_CLASS_PREFIX, classPrefix);
return getThis();
}
/**
* Get the class prefix of the shaded ledger manager factory class name allowed to be used for
* instantiating ledger manager factory.
*
* <p>This setting only takes effects when {@link #isShadedLedgerManagerFactoryClassAllowed()}
* returns true
*
* @return ledger manager factory class name.
* @see #isShadedLedgerManagerFactoryClassAllowed()
*/
public String getShadedLedgerManagerFactoryClassPrefix() {
return getString(SHADED_LEDGER_MANAGER_FACTORY_CLASS_PREFIX, "dlshade.");
}
/**
* Set Ledger Manager Factory Class Name.
*
* @param factoryClassName
* Ledger Manager Factory Class Name
*/
public void setLedgerManagerFactoryClassName(String factoryClassName) {
setProperty(LEDGER_MANAGER_FACTORY_CLASS, factoryClassName);
}
/**
* Get Ledger Manager Factory Class Name.
*
* @return ledger manager factory class name.
*/
public String getLedgerManagerFactoryClassName() {
return getString(LEDGER_MANAGER_FACTORY_CLASS);
}
/**
* Set Ledger metadata format version.
*
* @param metadataFormatVersion
* Ledger metadata format version. pass -1 to use default version
*/
public void setLedgerMetadataFormatVersion(int metadataFormatVersion) {
setProperty(LEDGER_METADATA_FORMAT_VERSION, metadataFormatVersion);
}
/**
* Get Ledger metadata format version.
*
* @return ledger metadata format version.
*/
public int getLedgerMetadataFormatVersion() {
return getInt(LEDGER_METADATA_FORMAT_VERSION, -1);
}
/**
* Get layout string ("null" if unconfigured).
*
* @return null, hierarchical, longhierarchical, or flat based on LEDGER_MANAGER_FACTORY_CLASS
*/
@SuppressWarnings("deprecation")
public String getLedgerManagerLayoutStringFromFactoryClass() throws ConfigurationException {
String ledgerManagerType;
Class<? extends LedgerManagerFactory> factoryClass = getLedgerManagerFactoryClass();
if (factoryClass == null) {
// set the ledger manager type to "null", so the driver implementation knows that the type is not set.
ledgerManagerType = "null";
} else {
if (!AbstractZkLedgerManagerFactory.class.isAssignableFrom(factoryClass)) {
// this is a non-zk implementation
throw new ConfigurationException("metadata service uri is not supported for " + factoryClass);
}
if (factoryClass == HierarchicalLedgerManagerFactory.class) {
ledgerManagerType = HierarchicalLedgerManagerFactory.NAME;
} else if (factoryClass == org.apache.bookkeeper.meta.FlatLedgerManagerFactory.class) {
ledgerManagerType = org.apache.bookkeeper.meta.FlatLedgerManagerFactory.NAME;
} else if (factoryClass == LongHierarchicalLedgerManagerFactory.class) {
ledgerManagerType = LongHierarchicalLedgerManagerFactory.NAME;
} else if (factoryClass == org.apache.bookkeeper.meta.MSLedgerManagerFactory.class) {
ledgerManagerType = org.apache.bookkeeper.meta.MSLedgerManagerFactory.NAME;
} else {
throw new IllegalArgumentException("Unknown zookeeper based ledger manager factory : "
+ factoryClass);
}
}
return ledgerManagerType;
}
/**
* Set Ledger Manager Factory Class.
*
* @param factoryClass
* Ledger Manager Factory Class
*/
public void setLedgerManagerFactoryClass(Class<? extends LedgerManagerFactory> factoryClass) {
setProperty(LEDGER_MANAGER_FACTORY_CLASS, factoryClass.getName());
}
/**
* Get ledger manager factory class.
*
* @return ledger manager factory class
*/
public Class<? extends LedgerManagerFactory> getLedgerManagerFactoryClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, LEDGER_MANAGER_FACTORY_CLASS,
null, LedgerManagerFactory.class,
DEFAULT_LOADER);
}
/**
* Set Zk Ledgers Root Path.
*
* @param zkLedgersPath zk ledgers root path
*/
@Deprecated
public void setZkLedgersRootPath(String zkLedgersPath) {
setProperty(ZK_LEDGERS_ROOT_PATH, zkLedgersPath);
}
/**
* Get Zk Ledgers Root Path.
*
* @return zk ledgers root path
*/
@Deprecated
public String getZkLedgersRootPath() {
return getString(ZK_LEDGERS_ROOT_PATH, "/ledgers");
}
/**
* Get zookeeper access request rate limit.
*
* @return zookeeper access request rate limit.
*/
public double getZkRequestRateLimit() {
return getDouble(ZK_REQUEST_RATE_LIMIT, 0);
}
/**
* Set zookeeper access request rate limit.
*
* @param rateLimit
* zookeeper access request rate limit.
*/
public void setZkRequestRateLimit(double rateLimit) {
setProperty(ZK_REQUEST_RATE_LIMIT, rateLimit);
}
/**
* Are z-node created with strict ACLs.
*
* @return usage of secure ZooKeeper ACLs
*/
public boolean isZkEnableSecurity() {
return getBoolean(ZK_ENABLE_SECURITY, false);
}
/**
* Set the usage of ACLs of new z-nodes.
*
* @param zkEnableSecurity
*/
public void setZkEnableSecurity(boolean zkEnableSecurity) {
setProperty(ZK_ENABLE_SECURITY, zkEnableSecurity);
}
/**
* Get the node under which available bookies are stored.
*
* @return Node under which available bookies are stored.
*/
@Deprecated
public String getZkAvailableBookiesPath() {
return getZkLedgersRootPath() + "/" + AVAILABLE_NODE;
}
/**
* Set the max entries to keep in fragment for re-replication. If fragment
* has more entries than this count, then the original fragment will be
* split into multiple small logical fragments by keeping max entries count
* to rereplicationEntryBatchSize. So, re-replication will happen in batches
* wise.
*/
public void setRereplicationEntryBatchSize(long rereplicationEntryBatchSize) {
setProperty(REREPLICATION_ENTRY_BATCH_SIZE, rereplicationEntryBatchSize);
}
/**
* Get the re-replication entry batch size.
*/
public long getRereplicationEntryBatchSize() {
return getLong(REREPLICATION_ENTRY_BATCH_SIZE, 10);
}
/**
* Get metastore implementation class.
*
* @return metastore implementation class name.
*/
public String getMetastoreImplClass() {
return getString(METASTORE_IMPL_CLASS);
}
/**
* Set metastore implementation class.
*
* @param metastoreImplClass
* Metastore implementation Class name.
*/
public void setMetastoreImplClass(String metastoreImplClass) {
setProperty(METASTORE_IMPL_CLASS, metastoreImplClass);
}
/**
* Get max entries per scan in metastore.
*
* @return max entries per scan in metastore.
*/
public int getMetastoreMaxEntriesPerScan() {
return getInt(METASTORE_MAX_ENTRIES_PER_SCAN, 50);
}
/**
* Set max entries per scan in metastore.
*
* @param maxEntries
* Max entries per scan in metastore.
*/
public void setMetastoreMaxEntriesPerScan(int maxEntries) {
setProperty(METASTORE_MAX_ENTRIES_PER_SCAN, maxEntries);
}
public void setFeature(String configProperty, Feature feature) {
setProperty(configProperty, feature);
}
public Feature getFeature(String configProperty, Feature defaultValue) {
if (null == getProperty(configProperty)) {
return defaultValue;
} else {
return (Feature) getProperty(configProperty);
}
}
/**
* Set Ledger id formatter Class.
*
* @param formatterClass
* LedgerIdFormatter Class
*/
public void setLedgerIdFormatterClass(Class<? extends LedgerIdFormatter> formatterClass) {
setProperty(LEDGERID_FORMATTER_CLASS, formatterClass.getName());
}
/**
* Get ledger id formatter class.
*
* @return LedgerIdFormatter class
*/
public Class<? extends LedgerIdFormatter> getLedgerIdFormatterClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, LEDGERID_FORMATTER_CLASS, DEFAULT_LEDGERID_FORMATTER,
LedgerIdFormatter.class, DEFAULT_LOADER);
}
/**
* Set entry formatter Class.
*
* @param formatterClass
* EntryFormatter Class
*/
public void setEntryFormatterClass(Class<? extends EntryFormatter> formatterClass) {
setProperty(ENTRY_FORMATTER_CLASS, formatterClass.getName());
}
/**
* Get entry formatter class.
*
* @return EntryFormatter class
*/
public Class<? extends EntryFormatter> getEntryFormatterClass()
throws ConfigurationException {
return ReflectionUtils.getClass(this, ENTRY_FORMATTER_CLASS, DEFAULT_ENTRY_FORMATTER, EntryFormatter.class,
DEFAULT_LOADER);
}
/**
* Set the client authentication provider factory class name.
* If this is not set, no authentication will be used
*
* @param factoryClass
* the client authentication provider factory class name
* @return client configuration
*/
public T setClientAuthProviderFactoryClass(
String factoryClass) {
setProperty(CLIENT_AUTH_PROVIDER_FACTORY_CLASS, factoryClass);
return getThis();
}
/**
* Get the client authentication provider factory class name.
* If this returns null, no authentication will take place.
*
* @return the client authentication provider factory class name or null.
*/
public String getClientAuthProviderFactoryClass() {
return getString(CLIENT_AUTH_PROVIDER_FACTORY_CLASS, null);
}
/**
* Get the maximum netty frame size in bytes. Any message received larger
* that this will be rejected.
*
* @return the maximum netty frame size in bytes.
*/
public int getNettyMaxFrameSizeBytes() {
return getInt(NETTY_MAX_FRAME_SIZE, DEFAULT_NETTY_MAX_FRAME_SIZE);
}
/**
* Set the max number of bytes a single message can be that is read by the bookie.
* Any message larger than that size will be rejected.
*
* @param maxSize
* the max size in bytes
* @return server configuration
*/
public T setNettyMaxFrameSizeBytes(int maxSize) {
setProperty(NETTY_MAX_FRAME_SIZE, String.valueOf(maxSize));
return getThis();
}
/**
* Get the security provider factory class name. If this returns null, no security will be enforced on the channel.
*
* @return the security provider factory class name or null.
*/
public String getTLSProviderFactoryClass() {
return getString(TLS_PROVIDER_FACTORY_CLASS, null);
}
/**
* Set the client security provider factory class name. If this is not set, no security will be used on the channel.
*
* @param factoryClass
* the client security provider factory class name
* @return client configuration
*/
public T setTLSProviderFactoryClass(String factoryClass) {
setProperty(TLS_PROVIDER_FACTORY_CLASS, factoryClass);
return getThis();
}
/**
* Get TLS Provider (JDK or OpenSSL).
*
* @return the TLS provider to use in creating TLS Context
*/
public String getTLSProvider() {
return getString(TLS_PROVIDER, "OpenSSL");
}
/**
* Set TLS Provider (JDK or OpenSSL).
*
* @param provider
* TLS Provider type
* @return Client Configuration
*/
public T setTLSProvider(String provider) {
setProperty(TLS_PROVIDER, provider);
return getThis();
}
/**
* Whether the client will send an TLS certificate on TLS-handshake.
*
* @see #setTLSClientAuthentication(boolean)
* @return whether TLS is enabled on the bookie or not.
*/
public boolean getTLSClientAuthentication() {
return getBoolean(TLS_CLIENT_AUTHENTICATION, false);
}
/**
* Specify whether the client will send an TLS certificate on TLS-handshake.
*
* @param enabled
* Whether to send a certificate or not
* @return client configuration
*/
public T setTLSClientAuthentication(boolean enabled) {
setProperty(TLS_CLIENT_AUTHENTICATION, enabled);
return getThis();
}
/**
* Set tls certificate files refresh duration in seconds.
*
* @param certFilesRefreshSec
* tls certificate files refresh duration in seconds (set 0 to
* disable auto refresh)
* @return current configuration
*/
public T setTLSCertFilesRefreshDurationSeconds(long certFilesRefreshSec) {
setProperty(TLS_CERT_FILES_REFRESH_DURATION_SECONDS, certFilesRefreshSec);
return getThis();
}
/**
* Get tls certificate files refresh duration in seconds.
*
* @return tls certificate files refresh duration in seconds. Default 0
* to disable auto refresh.
*
*/
public long getTLSCertFilesRefreshDurationSeconds() {
return getLong(TLS_CERT_FILES_REFRESH_DURATION_SECONDS, 0);
}
/**
* Set the list of enabled TLS cipher suites. Leave null not to override default JDK list. This list will be passed
* to {@link SSLEngine#setEnabledCipherSuites(java.lang.String[]) }. Please refer to official JDK JavaDocs
*
* @param list
* comma separated list of enabled TLS cipher suites
* @return current configuration
*/
public T setTLSEnabledCipherSuites(
String list) {
setProperty(TLS_ENABLED_CIPHER_SUITES, list);
return getThis();
}
/**
* Get the list of enabled TLS cipher suites.
*
* @return this list of enabled TLS cipher suites
*
* @see #setTLSEnabledCipherSuites(java.lang.String)
*/
public String getTLSEnabledCipherSuites() {
return getString(TLS_ENABLED_CIPHER_SUITES, null);
}
/**
* Set the list of enabled TLS protocols. Leave null not to override default JDK list. This list will be passed to
* {@link SSLEngine#setEnabledProtocols(java.lang.String[]) }. Please refer to official JDK JavaDocs
*
* @param list
* comma separated list of enabled TLS cipher suites
* @return current configuration
*/
public T setTLSEnabledProtocols(
String list) {
setProperty(TLS_ENABLED_PROTOCOLS, list);
return getThis();
}
/**
* Get the list of enabled TLS protocols.
*
* @return the list of enabled TLS protocols.
*
* @see #setTLSEnabledProtocols(java.lang.String)
*/
public String getTLSEnabledProtocols() {
return getString(TLS_ENABLED_PROTOCOLS, null);
}
/**
* Set the minimum number of racks per write quorum.
*/
public void setMinNumRacksPerWriteQuorum(int minNumRacksPerWriteQuorum) {
setProperty(MIN_NUM_RACKS_PER_WRITE_QUORUM, minNumRacksPerWriteQuorum);
}
/**
* Get the minimum number of racks per write quorum.
*/
public int getMinNumRacksPerWriteQuorum() {
return getInteger(MIN_NUM_RACKS_PER_WRITE_QUORUM, 2);
}
/**
* Set the minimum number of zones per write quorum in
* ZoneAwarePlacementPolicy.
*/
public void setMinNumZonesPerWriteQuorum(int minNumZonesPerWriteQuorum) {
setProperty(MIN_NUM_ZONES_PER_WRITE_QUORUM, minNumZonesPerWriteQuorum);
}
/**
* Get the minimum number of zones per write quorum in
* ZoneAwarePlacementPolicy.
*/
public int getMinNumZonesPerWriteQuorum() {
return getInteger(MIN_NUM_ZONES_PER_WRITE_QUORUM, 2);
}
/**
* Set the desired number of zones per write quorum in
* ZoneAwarePlacementPolicy.
*/
public void setDesiredNumZonesPerWriteQuorum(int desiredNumZonesPerWriteQuorum) {
setProperty(DESIRED_NUM_ZONES_PER_WRITE_QUORUM, desiredNumZonesPerWriteQuorum);
}
/**
* Get the desired number of zones per write quorum in
* ZoneAwarePlacementPolicy.
*/
public int getDesiredNumZonesPerWriteQuorum() {
return getInteger(DESIRED_NUM_ZONES_PER_WRITE_QUORUM, 3);
}
/**
* Set the flag to enforce strict zoneaware placement.
*
* <p>in ZoneawareEnsemblePlacementPolicy if strict placement is enabled then
* minZones/desiredZones in writeQuorum would be maintained otherwise it
* will pick nodes randomly.
*/
public void setEnforceStrictZoneawarePlacement(boolean enforceStrictZoneawarePlacement) {
setProperty(ENFORCE_STRICT_ZONEAWARE_PLACEMENT, enforceStrictZoneawarePlacement);
}
/**
* Get the flag to enforce strict zoneaware placement.
*
* <p>in ZoneawareEnsemblePlacementPolicy if strict placement is enabled then
* minZones/desiredZones in writeQuorum would be maintained otherwise it
* will pick nodes randomly.
*/
public boolean getEnforceStrictZoneawarePlacement() {
return getBoolean(ENFORCE_STRICT_ZONEAWARE_PLACEMENT, true);
}
/**
* Set the flag to enforce minimum number of racks per write quorum.
*/
public void setEnforceMinNumRacksPerWriteQuorum(boolean enforceMinNumRacksPerWriteQuorum) {
setProperty(ENFORCE_MIN_NUM_RACKS_PER_WRITE_QUORUM, enforceMinNumRacksPerWriteQuorum);
}
/**
* Get the flag which enforces the minimum number of racks per write quorum.
*/
public boolean getEnforceMinNumRacksPerWriteQuorum() {
return getBoolean(ENFORCE_MIN_NUM_RACKS_PER_WRITE_QUORUM, false);
}
/**
* Set the flag to enforce minimum number of fault domains for write.
*/
public void setEnforceMinNumFaultDomainsForWrite(boolean enforceMinNumFaultDomainsForWrite) {
setProperty(ENFORCE_MIN_NUM_FAULT_DOMAINS_FOR_WRITE, enforceMinNumFaultDomainsForWrite);
}
/**
* Get the flag to enforce minimum number of fault domains for write.
*/
public boolean getEnforceMinNumFaultDomainsForWrite() {
return getBoolean(ENFORCE_MIN_NUM_FAULT_DOMAINS_FOR_WRITE, false);
}
/**
* Sets the flag to ignore usage of localnode in placement policy.
*/
public void setIgnoreLocalNodeInPlacementPolicy(boolean ignoreLocalNodeInPlacementPolicy) {
setProperty(IGNORE_LOCAL_NODE_IN_PLACEMENT_POLICY, ignoreLocalNodeInPlacementPolicy);
}
/**
* Whether to ignore localnode in placementpolicy.
*/
public boolean getIgnoreLocalNodeInPlacementPolicy() {
return getBoolean(IGNORE_LOCAL_NODE_IN_PLACEMENT_POLICY, false);
}
/**
* Enable the Auditor to use system time as underreplicated ledger mark
* time.
*
* <p>If this is enabled, Auditor will write a ctime field into the
* underreplicated ledger znode.
*
* @param enabled
* flag to enable/disable Auditor using system time as
* underreplicated ledger mark time.
*/
public T setStoreSystemTimeAsLedgerUnderreplicatedMarkTime(boolean enabled) {
setProperty(STORE_SYSTEMTIME_AS_LEDGER_UNDERREPLICATED_MARK_TIME, enabled);
return getThis();
}
/**
* Return the flag that indicates whether auditor is using system time as
* underreplicated ledger mark time.
*
* @return the flag that indicates whether auditor is using system time as
* underreplicated ledger mark time.
*/
public boolean getStoreSystemTimeAsLedgerUnderreplicatedMarkTime() {
return getBoolean(STORE_SYSTEMTIME_AS_LEDGER_UNDERREPLICATED_MARK_TIME, true);
}
/**
* Whether to preserve MDC for tasks in Executor.
*
* @return flag to enable/disable MDC preservation in Executor.
*/
public boolean getPreserveMdcForTaskExecution() {
return getBoolean(PRESERVE_MDC_FOR_TASK_EXECUTION, false);
}
/**
* Whether to preserve MDC for tasks in Executor.
*
* @param enabled
* flag to enable/disable MDC preservation in Executor.
* @return configuration.
*/
public T setPreserveMdcForTaskExecution(boolean enabled) {
setProperty(PRESERVE_MDC_FOR_TASK_EXECUTION, enabled);
return getThis();
}
/**
* @return the configured pooling policy for the allocator.
*/
public PoolingPolicy getAllocatorPoolingPolicy() {
return PoolingPolicy.valueOf(this.getString(ALLOCATOR_POOLING_POLICY, PoolingPolicy.PooledDirect.toString()));
}
/**
* Define the memory pooling policy.
*
* <p>Default is {@link PoolingPolicy#PooledDirect}
*
* @param poolingPolicy
* the memory pooling policy
* @return configuration object.
*/
public T setAllocatorPoolingPolicy(PoolingPolicy poolingPolicy) {
this.setProperty(ALLOCATOR_POOLING_POLICY, poolingPolicy.toString());
return getThis();
}
/**
* @return the configured pooling concurrency for the allocator.
*/
public int getAllocatorPoolingConcurrency() {
return this.getInteger(ALLOCATOR_POOLING_CONCURRENCY, PooledByteBufAllocator.defaultNumDirectArena());
}
/**
* Controls the amount of concurrency for the memory pool.
*
* <p>Default is to have a number of allocator arenas equals to 2 * CPUS.
*
* <p>Decreasing this number will reduce the amount of memory overhead, at the
* expense of increased allocation contention.
*
* @param concurrency
* the concurrency level to use for the allocator pool
* @return configuration object.
*/
public T setAllocatorPoolingConcurrenncy(int concurrency) {
this.setProperty(ALLOCATOR_POOLING_POLICY, concurrency);
return getThis();
}
/**
* @return the configured ouf of memory policy for the allocator.
*/
public OutOfMemoryPolicy getAllocatorOutOfMemoryPolicy() {
return OutOfMemoryPolicy
.valueOf(this.getString(ALLOCATOR_OOM_POLICY, OutOfMemoryPolicy.FallbackToHeap.toString()));
}
/**
* Define the memory allocator out of memory policy.
*
* <p>Default is {@link OutOfMemoryPolicy#FallbackToHeap}
*
* @param oomPolicy
* the "out-of-memory" policy for the memory allocator
* @return configuration object.
*/
public T setAllocatorOutOfMemoryPolicy(OutOfMemoryPolicy oomPolicy) {
this.setProperty(ALLOCATOR_OOM_POLICY, oomPolicy.toString());
return getThis();
}
/**
* Return the configured leak detection policy for the allocator.
*/
public LeakDetectionPolicy getAllocatorLeakDetectionPolicy() {
//see: https://lists.apache.org/thread/d3zw8bxhlg0wxfhocyjglq0nbxrww3sg
String nettyLevelStr = System.getProperty("io.netty.leakDetectionLevel", LeakDetectionPolicy.Disabled.name());
nettyLevelStr = System.getProperty("io.netty.leakDetection.level", nettyLevelStr);
String bkLevelStr = getString(ALLOCATOR_LEAK_DETECTION_POLICY, LeakDetectionPolicy.Disabled.name());
LeakDetectionPolicy nettyLevel = LeakDetectionPolicy.parseLevel(nettyLevelStr);
LeakDetectionPolicy bkLevel = LeakDetectionPolicy.parseLevel(bkLevelStr);
if (nettyLevel.ordinal() >= bkLevel.ordinal()) {
return nettyLevel;
} else {
return bkLevel;
}
}
/**
* Enable the leak detection for the allocator.
*
* <p>Default is {@link LeakDetectionPolicy#Disabled}
*
* @param leakDetectionPolicy
* the leak detection policy for the memory allocator
* @return configuration object.
*/
public T setAllocatorLeakDetectionPolicy(LeakDetectionPolicy leakDetectionPolicy) {
this.setProperty(ALLOCATOR_LEAK_DETECTION_POLICY, leakDetectionPolicy.toString());
return getThis();
}
/**
* Return whether the busy-wait is enabled for BookKeeper and Netty IO threads.
*
* <p>Default is false
*
* @return the value of the option
*/
public boolean isBusyWaitEnabled() {
return getBoolean(ENABLE_BUSY_WAIT, false);
}
/**
* Option to enable busy-wait settings.
*
* <p>Default is false.
*
* <p>WARNING: This option will enable spin-waiting on executors and IO threads
* in order to reduce latency during context switches. The spinning will
* consume 100% CPU even when bookie is not doing any work. It is
* recommended to reduce the number of threads in the main workers pool
* ({@link ClientConfiguration#setNumWorkerThreads(int)}) and Netty event
* loop {@link ClientConfiguration#setNumIOThreads(int)} to only have few
* CPU cores busy.
* </p>
*
* @param busyWaitEanbled
* if enabled, use spin-waiting strategy to reduce latency in
* context switches
*
* @see #isBusyWaitEnabled()
*/
public T setBusyWaitEnabled(boolean busyWaitEanbled) {
setProperty(ENABLE_BUSY_WAIT, busyWaitEanbled);
return getThis();
}
/**
* Return the flag indicating whether to limit stats logging.
*
* @return
* the boolean flag indicating whether to limit stats logging
*/
public boolean getLimitStatsLogging() {
return getBoolean(LIMIT_STATS_LOGGING, true);
}
/**
* Sets flag to limit the stats logging.
*
* @param limitStatsLogging
* flag to limit the stats logging.
* @return configuration.
*/
public T setLimitStatsLogging(boolean limitStatsLogging) {
setProperty(LIMIT_STATS_LOGGING, limitStatsLogging);
return getThis();
}
/**
* Get the bytes rate of re-replication.
* Default value is -1 which it means entries will replicated without any throttling activity.
*
* @return bytes rate of re-replication.
*/
public int getReplicationRateByBytes() {
return getInt(REPLICATION_RATE_BY_BYTES, -1);
}
/**
* Set the bytes rate of re-replication.
*
* @param rate bytes rate of re-replication.
*
* @return ClientConfiguration
*/
public T setReplicationRateByBytes(int rate) {
this.setProperty(REPLICATION_RATE_BY_BYTES, rate);
return getThis();
}
/**
* Trickery to allow inheritance with fluent style.
*/
protected abstract T getThis();
/**
* returns the string representation of json format of this config.
*
* @return
* @throws ParseJsonException
*/
public String asJson() throws ParseJsonException {
return JsonUtil.toJson(toMap());
}
private Map<String, Object> toMap() {
Map<String, Object> configMap = new HashMap<>();
Iterator<String> iterator = this.getKeys();
while (iterator.hasNext()) {
String key = iterator.next();
Object property = this.getProperty(key);
if (property != null) {
configMap.put(key, property.toString());
}
}
return configMap;
}
}
| 313 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Configurations used by bookkeeper.
*/
package org.apache.bookkeeper.conf; | 314 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static org.apache.bookkeeper.client.LedgerHandle.INVALID_ENTRY_ID;
import static org.apache.bookkeeper.replication.ReplicationStats.NUM_BYTES_READ;
import static org.apache.bookkeeper.replication.ReplicationStats.NUM_BYTES_WRITTEN;
import static org.apache.bookkeeper.replication.ReplicationStats.NUM_ENTRIES_READ;
import static org.apache.bookkeeper.replication.ReplicationStats.NUM_ENTRIES_WRITTEN;
import static org.apache.bookkeeper.replication.ReplicationStats.READ_DATA_LATENCY;
import static org.apache.bookkeeper.replication.ReplicationStats.REPLICATION_WORKER_SCOPE;
import static org.apache.bookkeeper.replication.ReplicationStats.WRITE_DATA_LATENCY;
import com.google.common.util.concurrent.RateLimiter;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.util.ReferenceCounted;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.MultiCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.bookkeeper.util.ByteBufList;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.zookeeper.AsyncCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the helper class for replicating the fragments from one bookie to
* another.
*/
@StatsDoc(
name = REPLICATION_WORKER_SCOPE,
help = "Ledger fragment replicator related stats"
)
public class LedgerFragmentReplicator {
// BookKeeper instance
private BookKeeper bkc;
private StatsLogger statsLogger;
@StatsDoc(
name = NUM_ENTRIES_READ,
help = "Number of entries read by the replicator"
)
private final Counter numEntriesRead;
@StatsDoc(
name = NUM_BYTES_READ,
help = "The distribution of size of entries read by the replicator"
)
private final OpStatsLogger numBytesRead;
@StatsDoc(
name = NUM_ENTRIES_WRITTEN,
help = "Number of entries written by the replicator"
)
private final Counter numEntriesWritten;
@StatsDoc(
name = NUM_BYTES_WRITTEN,
help = "The distribution of size of entries written by the replicator"
)
private final OpStatsLogger numBytesWritten;
@StatsDoc(
name = READ_DATA_LATENCY,
help = "The distribution of latency of read entries by the replicator"
)
private final OpStatsLogger readDataLatency;
@StatsDoc(
name = WRITE_DATA_LATENCY,
help = "The distribution of latency of write entries by the replicator"
)
private final OpStatsLogger writeDataLatency;
protected Throttler replicationThrottle = null;
private AtomicInteger averageEntrySize;
private static final int INITIAL_AVERAGE_ENTRY_SIZE = 1024;
private static final double AVERAGE_ENTRY_SIZE_RATIO = 0.8;
private ClientConfiguration conf;
public LedgerFragmentReplicator(BookKeeper bkc, StatsLogger statsLogger, ClientConfiguration conf) {
this.bkc = bkc;
this.statsLogger = statsLogger;
numEntriesRead = this.statsLogger.getCounter(NUM_ENTRIES_READ);
numBytesRead = this.statsLogger.getOpStatsLogger(NUM_BYTES_READ);
numEntriesWritten = this.statsLogger.getCounter(NUM_ENTRIES_WRITTEN);
numBytesWritten = this.statsLogger.getOpStatsLogger(NUM_BYTES_WRITTEN);
readDataLatency = this.statsLogger.getOpStatsLogger(READ_DATA_LATENCY);
writeDataLatency = this.statsLogger.getOpStatsLogger(WRITE_DATA_LATENCY);
if (conf.getReplicationRateByBytes() > 0) {
this.replicationThrottle = new Throttler(conf.getReplicationRateByBytes());
}
averageEntrySize = new AtomicInteger(INITIAL_AVERAGE_ENTRY_SIZE);
this.conf = conf;
}
public LedgerFragmentReplicator(BookKeeper bkc, ClientConfiguration conf) {
this(bkc, NullStatsLogger.INSTANCE, conf);
}
private static final Logger LOG = LoggerFactory
.getLogger(LedgerFragmentReplicator.class);
private void replicateFragmentInternal(final LedgerHandle lh,
final LedgerFragment lf,
final AsyncCallback.VoidCallback ledgerFragmentMcb,
final Set<BookieId> newBookies,
final BiConsumer<Long, Long> onReadEntryFailureCallback) throws InterruptedException {
if (!lf.isClosed()) {
LOG.error("Trying to replicate an unclosed fragment;"
+ " This is not safe {}", lf);
ledgerFragmentMcb.processResult(BKException.Code.UnclosedFragmentException,
null, null);
return;
}
Long startEntryId = lf.getFirstStoredEntryId();
Long endEntryId = lf.getLastStoredEntryId();
/*
* if startEntryId is INVALID_ENTRY_ID then endEntryId should be
* INVALID_ENTRY_ID and viceversa.
*/
if (startEntryId == INVALID_ENTRY_ID ^ endEntryId == INVALID_ENTRY_ID) {
LOG.error("For LedgerFragment: {}, seeing inconsistent firstStoredEntryId: {} and lastStoredEntryId: {}",
lf, startEntryId, endEntryId);
assert false;
}
if (startEntryId > endEntryId || endEntryId <= INVALID_ENTRY_ID) {
// for open ledger which there is no entry, the start entry id is 0,
// the end entry id is -1.
// we can return immediately to trigger forward read
ledgerFragmentMcb.processResult(BKException.Code.OK, null, null);
return;
}
/*
* Add all the entries to entriesToReplicate list from
* firstStoredEntryId to lastStoredEntryID.
*/
List<Long> entriesToReplicate = new LinkedList<Long>();
long lastStoredEntryId = lf.getLastStoredEntryId();
for (long i = lf.getFirstStoredEntryId(); i <= lastStoredEntryId; i++) {
entriesToReplicate.add(i);
}
/*
* Now asynchronously replicate all of the entries for the ledger
* fragment that were on the dead bookie.
*/
MultiCallback ledgerFragmentEntryMcb = new MultiCallback(
entriesToReplicate.size(), ledgerFragmentMcb, null, BKException.Code.OK,
BKException.Code.LedgerRecoveryException);
if (this.replicationThrottle != null) {
this.replicationThrottle.resetRate(this.conf.getReplicationRateByBytes());
}
for (final Long entryId : entriesToReplicate) {
recoverLedgerFragmentEntry(entryId, lh, ledgerFragmentEntryMcb,
newBookies, onReadEntryFailureCallback);
}
}
/**
* This method replicate a ledger fragment which is a contiguous portion of
* a ledger that was stored in an ensemble that included the failed bookie.
* It will Splits the fragment into multiple sub fragments by keeping the
* max entries up to the configured value of rereplicationEntryBatchSize and
* then it re-replicates that batched entry fragments one by one. After
* re-replication of all batched entry fragments, it will update the
* ensemble info with new Bookie once
*
* @param lh
* LedgerHandle for the ledger
* @param lf
* LedgerFragment to replicate
* @param ledgerFragmentMcb
* MultiCallback to invoke once we've recovered the current
* ledger fragment.
* @param targetBookieAddresses
* New bookies we want to use to recover and replicate the ledger
* entries that were stored on the failed bookie.
*/
void replicate(final LedgerHandle lh, final LedgerFragment lf,
final AsyncCallback.VoidCallback ledgerFragmentMcb,
final Set<BookieId> targetBookieAddresses,
final BiConsumer<Long, Long> onReadEntryFailureCallback)
throws InterruptedException {
Set<LedgerFragment> partionedFragments = splitIntoSubFragments(lh, lf,
bkc.getConf().getRereplicationEntryBatchSize());
LOG.info("Replicating fragment {} in {} sub fragments.",
lf, partionedFragments.size());
replicateNextBatch(lh, partionedFragments.iterator(),
ledgerFragmentMcb, targetBookieAddresses, onReadEntryFailureCallback);
}
/**
* Replicate the batched entry fragments one after other.
*/
private void replicateNextBatch(final LedgerHandle lh,
final Iterator<LedgerFragment> fragments,
final AsyncCallback.VoidCallback ledgerFragmentMcb,
final Set<BookieId> targetBookieAddresses,
final BiConsumer<Long, Long> onReadEntryFailureCallback) {
if (fragments.hasNext()) {
try {
replicateFragmentInternal(lh, fragments.next(),
new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String v, Object ctx) {
if (rc != BKException.Code.OK) {
ledgerFragmentMcb.processResult(rc, null,
null);
} else {
replicateNextBatch(lh, fragments,
ledgerFragmentMcb,
targetBookieAddresses,
onReadEntryFailureCallback);
}
}
}, targetBookieAddresses, onReadEntryFailureCallback);
} catch (InterruptedException e) {
ledgerFragmentMcb.processResult(
BKException.Code.InterruptedException, null, null);
Thread.currentThread().interrupt();
}
} else {
ledgerFragmentMcb.processResult(BKException.Code.OK, null, null);
}
}
/**
* Split the full fragment into batched entry fragments by keeping
* rereplicationEntryBatchSize of entries in each one and can treat them as
* sub fragments.
*/
static Set<LedgerFragment> splitIntoSubFragments(LedgerHandle lh,
LedgerFragment ledgerFragment, long rereplicationEntryBatchSize) {
Set<LedgerFragment> fragments = new HashSet<LedgerFragment>();
if (rereplicationEntryBatchSize <= 0) {
// rereplicationEntryBatchSize can not be 0 or less than 0,
// returning with the current fragment
fragments.add(ledgerFragment);
return fragments;
}
long firstEntryId = ledgerFragment.getFirstStoredEntryId();
long lastEntryId = ledgerFragment.getLastStoredEntryId();
/*
* if firstEntryId is INVALID_ENTRY_ID then lastEntryId should be
* INVALID_ENTRY_ID and viceversa.
*/
if (firstEntryId == INVALID_ENTRY_ID ^ lastEntryId == INVALID_ENTRY_ID) {
LOG.error("For LedgerFragment: {}, seeing inconsistent firstStoredEntryId: {} and lastStoredEntryId: {}",
ledgerFragment, firstEntryId, lastEntryId);
assert false;
}
long numberOfEntriesToReplicate = firstEntryId == INVALID_ENTRY_ID ? 0 : (lastEntryId - firstEntryId) + 1;
long splitsWithFullEntries = numberOfEntriesToReplicate
/ rereplicationEntryBatchSize;
if (splitsWithFullEntries == 0) {// only one fragment
fragments.add(ledgerFragment);
return fragments;
}
long fragmentSplitLastEntry = 0;
for (int i = 0; i < splitsWithFullEntries; i++) {
fragmentSplitLastEntry = (firstEntryId + rereplicationEntryBatchSize) - 1;
fragments.add(new LedgerFragment(lh, firstEntryId,
fragmentSplitLastEntry, ledgerFragment.getBookiesIndexes()));
firstEntryId = fragmentSplitLastEntry + 1;
}
long lastSplitWithPartialEntries = numberOfEntriesToReplicate
% rereplicationEntryBatchSize;
if (lastSplitWithPartialEntries > 0) {
fragments.add(new LedgerFragment(lh, firstEntryId, firstEntryId
+ lastSplitWithPartialEntries - 1, ledgerFragment
.getBookiesIndexes()));
}
return fragments;
}
/**
* This method asynchronously recovers a specific ledger entry by reading
* the values via the BookKeeper Client (which would read it from the other
* replicas) and then writing it to the chosen new bookie.
*
* @param entryId
* Ledger Entry ID to recover.
* @param lh
* LedgerHandle for the ledger
* @param ledgerFragmentEntryMcb
* MultiCallback to invoke once we've recovered the current
* ledger entry.
* @param newBookies
* New bookies we want to use to recover and replicate the ledger
* entries that were stored on the failed bookie.
*/
private void recoverLedgerFragmentEntry(final Long entryId,
final LedgerHandle lh,
final AsyncCallback.VoidCallback ledgerFragmentEntryMcb,
final Set<BookieId> newBookies,
final BiConsumer<Long, Long> onReadEntryFailureCallback) throws InterruptedException {
final long ledgerId = lh.getId();
final AtomicInteger numCompleted = new AtomicInteger(0);
final AtomicBoolean completed = new AtomicBoolean(false);
if (replicationThrottle != null) {
replicationThrottle.acquire(averageEntrySize.get());
}
final WriteCallback multiWriteCallback = new WriteCallback() {
@Override
public void writeComplete(int rc, long ledgerId, long entryId, BookieId addr, Object ctx) {
if (rc != BKException.Code.OK) {
LOG.error("BK error writing entry for ledgerId: {}, entryId: {}, bookie: {}",
ledgerId, entryId, addr, BKException.create(rc));
if (completed.compareAndSet(false, true)) {
ledgerFragmentEntryMcb.processResult(rc, null, null);
}
} else {
numEntriesWritten.inc();
if (ctx instanceof Long) {
numBytesWritten.registerSuccessfulValue((Long) ctx);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Success writing ledger id {}, entry id {} to a new bookie {}!",
ledgerId, entryId, addr);
}
if (numCompleted.incrementAndGet() == newBookies.size() && completed.compareAndSet(false, true)) {
ledgerFragmentEntryMcb.processResult(rc, null, null);
}
}
}
};
long startReadEntryTime = MathUtils.nowInNano();
/*
* Read the ledger entry using the LedgerHandle. This will allow us to
* read the entry from one of the other replicated bookies other than
* the dead one.
*/
lh.asyncReadEntries(entryId, entryId, new ReadCallback() {
@Override
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
if (rc != BKException.Code.OK) {
LOG.error("BK error reading ledger entry: " + entryId,
BKException.create(rc));
onReadEntryFailureCallback.accept(ledgerId, entryId);
ledgerFragmentEntryMcb.processResult(rc, null, null);
return;
}
readDataLatency.registerSuccessfulEvent(MathUtils.elapsedNanos(startReadEntryTime),
TimeUnit.NANOSECONDS);
/*
* Now that we've read the ledger entry, write it to the new
* bookie we've selected.
*/
LedgerEntry entry = seq.nextElement();
byte[] data = entry.getEntry();
final long dataLength = data.length;
numEntriesRead.inc();
numBytesRead.registerSuccessfulValue(dataLength);
ReferenceCounted toSend = lh.getDigestManager()
.computeDigestAndPackageForSending(entryId,
lh.getLastAddConfirmed(), entry.getLength(),
Unpooled.wrappedBuffer(data, 0, data.length),
lh.getLedgerKey(),
0
);
if (replicationThrottle != null) {
if (toSend instanceof ByteBuf) {
updateAverageEntrySize(((ByteBuf) toSend).readableBytes());
} else if (toSend instanceof ByteBufList) {
updateAverageEntrySize(((ByteBufList) toSend).readableBytes());
}
}
for (BookieId newBookie : newBookies) {
long startWriteEntryTime = MathUtils.nowInNano();
bkc.getBookieClient().addEntry(newBookie, lh.getId(),
lh.getLedgerKey(), entryId, toSend,
multiWriteCallback, dataLength, BookieProtocol.FLAG_RECOVERY_ADD,
false, WriteFlag.NONE);
writeDataLatency.registerSuccessfulEvent(
MathUtils.elapsedNanos(startWriteEntryTime), TimeUnit.NANOSECONDS);
}
toSend.release();
}
}, null);
}
private void updateAverageEntrySize(int toSendSize) {
averageEntrySize.updateAndGet(value -> (int) (value * AVERAGE_ENTRY_SIZE_RATIO
+ (1 - AVERAGE_ENTRY_SIZE_RATIO) * toSendSize));
}
/**
* Callback for recovery of a single ledger fragment. Once the fragment has
* had all entries replicated, update the ensemble in zookeeper. Once
* finished propogate callback up to ledgerFragmentsMcb which should be a
* multicallback responsible for all fragments in a single ledger
*/
static class SingleFragmentCallback implements AsyncCallback.VoidCallback {
final AsyncCallback.VoidCallback ledgerFragmentsMcb;
final LedgerHandle lh;
final LedgerManager ledgerManager;
final long fragmentStartId;
final Map<BookieId, BookieId> oldBookie2NewBookie;
SingleFragmentCallback(AsyncCallback.VoidCallback ledgerFragmentsMcb,
LedgerHandle lh, LedgerManager ledgerManager, long fragmentStartId,
Map<BookieId, BookieId> oldBookie2NewBookie) {
this.ledgerFragmentsMcb = ledgerFragmentsMcb;
this.lh = lh;
this.ledgerManager = ledgerManager;
this.fragmentStartId = fragmentStartId;
this.oldBookie2NewBookie = oldBookie2NewBookie;
}
@Override
public void processResult(int rc, String path, Object ctx) {
if (rc != BKException.Code.OK) {
LOG.error("BK error replicating ledger fragments for ledger: "
+ lh.getId(), BKException.create(rc));
ledgerFragmentsMcb.processResult(rc, null, null);
return;
}
updateEnsembleInfo(ledgerManager, ledgerFragmentsMcb, fragmentStartId, lh, oldBookie2NewBookie);
}
}
/**
* Updates the ensemble with newBookie and notify the ensembleUpdatedCb.
*/
private static void updateEnsembleInfo(
LedgerManager ledgerManager, AsyncCallback.VoidCallback ensembleUpdatedCb, long fragmentStartId,
LedgerHandle lh, Map<BookieId, BookieId> oldBookie2NewBookie) {
MetadataUpdateLoop updateLoop = new MetadataUpdateLoop(
ledgerManager,
lh.getId(),
lh::getVersionedLedgerMetadata,
(metadata) -> {
// returns true if any of old bookies exist in ensemble
List<BookieId> ensemble = metadata.getAllEnsembles().get(fragmentStartId);
return oldBookie2NewBookie.keySet().stream().anyMatch(ensemble::contains);
},
(currentMetadata) -> {
// replace all old bookies with new bookies in ensemble
List<BookieId> newEnsemble = currentMetadata.getAllEnsembles().get(fragmentStartId)
.stream().map((bookie) -> oldBookie2NewBookie.getOrDefault(bookie, bookie))
.collect(Collectors.toList());
return LedgerMetadataBuilder.from(currentMetadata)
.replaceEnsembleEntry(fragmentStartId, newEnsemble).build();
},
lh::setLedgerMetadata);
updateLoop.run().whenComplete((result, ex) -> {
if (ex == null) {
LOG.info("Updated ZK to point ledger fragments"
+ " from old bookies to new bookies: {}", oldBookie2NewBookie);
ensembleUpdatedCb.processResult(BKException.Code.OK, null, null);
} else {
LOG.error("Error updating ledger config metadata for ledgerId {}", lh.getId(), ex);
ensembleUpdatedCb.processResult(
BKException.getExceptionCode(ex, BKException.Code.UnexpectedConditionException),
null, null);
}
});
}
static class Throttler {
private final RateLimiter rateLimiter;
Throttler(int throttleBytes) {
this.rateLimiter = RateLimiter.create(throttleBytes);
}
// reset rate of limiter before compact one entry log file
void resetRate(int throttleBytes) {
this.rateLimiter.setRate(throttleBytes);
}
// get rate of limiter for unit test
double getRate() {
return this.rateLimiter.getRate();
}
// acquire. if bybytes: bytes of this entry; if byentries: 1.
void acquire(int permits) {
rateLimiter.acquire(permits);
}
}
}
| 315 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BKException.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.function.Function;
/**
* Class the enumerates all the possible error conditions.
*
* <P>This class is going to be deprecate soon, please use the new class {@link BKException}
*/
@SuppressWarnings("serial")
public abstract class BKException extends org.apache.bookkeeper.client.api.BKException {
public static final Function<Throwable, BKException> HANDLER = cause -> {
if (cause == null) {
return null;
}
if (cause instanceof BKException) {
return (BKException) cause;
} else {
BKException ex = new BKUnexpectedConditionException();
ex.initCause(cause);
return ex;
}
};
BKException(int code) {
super(code);
}
BKException(int code, Throwable cause) {
super(code, cause);
}
/**
* Create an exception from an error code.
* @param code return error code
* @return corresponding exception
*/
public static BKException create(int code) {
switch (code) {
case Code.ReadException:
return new BKReadException();
case Code.QuorumException:
return new BKQuorumException();
case Code.NoBookieAvailableException:
return new BKBookieException();
case Code.DigestNotInitializedException:
return new BKDigestNotInitializedException();
case Code.DigestMatchException:
return new BKDigestMatchException();
case Code.NotEnoughBookiesException:
return new BKNotEnoughBookiesException();
case Code.NoSuchLedgerExistsException:
return new BKNoSuchLedgerExistsException();
case Code.NoSuchLedgerExistsOnMetadataServerException:
return new BKNoSuchLedgerExistsOnMetadataServerException();
case Code.BookieHandleNotAvailableException:
return new BKBookieHandleNotAvailableException();
case Code.ZKException:
return new ZKException();
case Code.MetaStoreException:
return new MetaStoreException();
case Code.LedgerRecoveryException:
return new BKLedgerRecoveryException();
case Code.LedgerClosedException:
return new BKLedgerClosedException();
case Code.WriteException:
return new BKWriteException();
case Code.NoSuchEntryException:
return new BKNoSuchEntryException();
case Code.IncorrectParameterException:
return new BKIncorrectParameterException();
case Code.InterruptedException:
return new BKInterruptedException();
case Code.ProtocolVersionException:
return new BKProtocolVersionException();
case Code.MetadataVersionException:
return new BKMetadataVersionException();
case Code.LedgerFencedException:
return new BKLedgerFencedException();
case Code.UnauthorizedAccessException:
return new BKUnauthorizedAccessException();
case Code.UnclosedFragmentException:
return new BKUnclosedFragmentException();
case Code.WriteOnReadOnlyBookieException:
return new BKWriteOnReadOnlyBookieException();
case Code.TooManyRequestsException:
return new BKTooManyRequestsException();
case Code.ReplicationException:
return new BKReplicationException();
case Code.ClientClosedException:
return new BKClientClosedException();
case Code.LedgerExistException:
return new BKLedgerExistException();
case Code.IllegalOpException:
return new BKIllegalOpException();
case Code.AddEntryQuorumTimeoutException:
return new BKAddEntryQuorumTimeoutException();
case Code.DuplicateEntryIdException:
return new BKDuplicateEntryIdException();
case Code.TimeoutException:
return new BKTimeoutException();
case Code.LedgerIdOverflowException:
return new BKLedgerIdOverflowException();
case Code.SecurityException:
return new BKSecurityException();
case Code.MetadataSerializationException:
return new BKMetadataSerializationException();
default:
return new BKUnexpectedConditionException();
}
}
/**
* Legacy interface which holds constants for BookKeeper error codes.
* The list has been moved to {@link BKException}
*/
public interface Code extends org.apache.bookkeeper.client.api.BKException.Code {
}
/**
* Bookkeeper security exception.
*/
public static class BKSecurityException extends BKException {
public BKSecurityException() {
super(BKException.Code.SecurityException);
}
}
/**
* Bookkeeper read exception.
*/
public static class BKReadException extends BKException {
public BKReadException() {
super(BKException.Code.ReadException);
}
}
/**
* Bookkeeper no such entry exception.
*/
public static class BKNoSuchEntryException extends BKException {
public BKNoSuchEntryException() {
super(BKException.Code.NoSuchEntryException);
}
}
/**
* Bookkeeper quorum exception.
*/
public static class BKQuorumException extends BKException {
public BKQuorumException() {
super(BKException.Code.QuorumException);
}
}
/**
* Bookkeeper bookie exception.
*/
public static class BKBookieException extends BKException {
public BKBookieException() {
super(BKException.Code.NoBookieAvailableException);
}
}
/**
* Bookkeeper digest not initialized exception.
*/
public static class BKDigestNotInitializedException extends BKException {
public BKDigestNotInitializedException() {
super(BKException.Code.DigestNotInitializedException);
}
}
/**
* Bookkeeper digest match exception.
*/
public static class BKDigestMatchException extends BKException {
public BKDigestMatchException() {
super(BKException.Code.DigestMatchException);
}
}
/**
* Bookkeeper illegal operation exception.
*/
public static class BKIllegalOpException extends BKException {
public BKIllegalOpException() {
super(BKException.Code.IllegalOpException);
}
}
/**
* Bookkeeper add entry quorum timeout exception.
*/
public static class BKAddEntryQuorumTimeoutException extends BKException {
public BKAddEntryQuorumTimeoutException() {
super(BKException.Code.AddEntryQuorumTimeoutException);
}
}
/**
* Bookkeeper duplicate entry id exception.
*/
public static class BKDuplicateEntryIdException extends BKException {
public BKDuplicateEntryIdException() {
super(BKException.Code.DuplicateEntryIdException);
}
}
/**
* Bookkeeper unexpected condition exception.
*/
public static class BKUnexpectedConditionException extends BKException {
public BKUnexpectedConditionException() {
super(BKException.Code.UnexpectedConditionException);
}
}
/**
* Bookkeeper not enough bookies exception.
*/
public static class BKNotEnoughBookiesException extends BKException {
public BKNotEnoughBookiesException() {
super(BKException.Code.NotEnoughBookiesException);
}
public BKNotEnoughBookiesException(Throwable cause) {
super(BKException.Code.NotEnoughBookiesException, cause);
}
}
/**
* Bookkeeper write exception.
*/
public static class BKWriteException extends BKException {
public BKWriteException() {
super(BKException.Code.WriteException);
}
}
/**
* Bookkeeper protocol version exception.
*/
public static class BKProtocolVersionException extends BKException {
public BKProtocolVersionException() {
super(BKException.Code.ProtocolVersionException);
}
}
/**
* Bookkeeper metadata version exception.
*/
public static class BKMetadataVersionException extends BKException {
public BKMetadataVersionException() {
super(BKException.Code.MetadataVersionException);
}
}
/**
* Bookkeeper no such ledger exists exception.
*/
public static class BKNoSuchLedgerExistsException extends BKException {
public BKNoSuchLedgerExistsException() {
super(BKException.Code.NoSuchLedgerExistsException);
}
}
/**
* Bookkeeper no such ledger exists on metadata server exception.
*/
public static class BKNoSuchLedgerExistsOnMetadataServerException extends BKException {
public BKNoSuchLedgerExistsOnMetadataServerException() {
super(BKException.Code.NoSuchLedgerExistsOnMetadataServerException);
}
}
/**
* Bookkeeper bookie handle not available exception.
*/
public static class BKBookieHandleNotAvailableException extends BKException {
public BKBookieHandleNotAvailableException() {
super(BKException.Code.BookieHandleNotAvailableException);
}
}
/**
* Zookeeper exception.
*/
public static class ZKException extends BKException {
public ZKException() {
super(BKException.Code.ZKException);
}
public ZKException(Throwable cause) {
super(BKException.Code.ZKException, cause);
}
}
/**
* Metastore exception.
*/
public static class MetaStoreException extends BKException {
public MetaStoreException() {
super(BKException.Code.MetaStoreException);
}
public MetaStoreException(Throwable cause) {
super(BKException.Code.MetaStoreException, cause);
}
}
/**
* Bookkeeper ledger exist exception.
*/
public static class BKLedgerExistException extends BKException {
public BKLedgerExistException() {
super(BKException.Code.LedgerExistException);
}
}
/**
* Bookkeeper ledger recovery exception.
*/
public static class BKLedgerRecoveryException extends BKException {
public BKLedgerRecoveryException() {
super(BKException.Code.LedgerRecoveryException);
}
}
/**
* Bookkeeper ledger closed exception.
*/
public static class BKLedgerClosedException extends BKException {
public BKLedgerClosedException() {
super(BKException.Code.LedgerClosedException);
}
}
/**
* Bookkeeper incorrect parameter exception.
*/
public static class BKIncorrectParameterException extends BKException {
public BKIncorrectParameterException() {
super(BKException.Code.IncorrectParameterException);
}
}
/**
* Bookkeeper interrupted exception.
*/
public static class BKInterruptedException extends BKException {
public BKInterruptedException() {
super(BKException.Code.InterruptedException);
}
}
/**
* Bookkeeper ledger fenced exception.
*/
public static class BKLedgerFencedException extends BKException {
public BKLedgerFencedException() {
super(BKException.Code.LedgerFencedException);
}
}
/**
* Bookkeeper unauthorized access exception.
*/
public static class BKUnauthorizedAccessException extends BKException {
public BKUnauthorizedAccessException() {
super(BKException.Code.UnauthorizedAccessException);
}
}
/**
* Bookkeeper unclosed fragment exception.
*/
public static class BKUnclosedFragmentException extends BKException {
public BKUnclosedFragmentException() {
super(BKException.Code.UnclosedFragmentException);
}
}
/**
* Bookkeeper write on readonly bookie exception.
*/
public static class BKWriteOnReadOnlyBookieException extends BKException {
public BKWriteOnReadOnlyBookieException() {
super(BKException.Code.WriteOnReadOnlyBookieException);
}
}
/**
* Bookkeeper too many requests exception.
*/
public static class BKTooManyRequestsException extends BKException {
public BKTooManyRequestsException() {
super(BKException.Code.TooManyRequestsException);
}
}
/**
* Bookkeeper replication exception.
*/
public static class BKReplicationException extends BKException {
public BKReplicationException() {
super(BKException.Code.ReplicationException);
}
}
/**
* Bookkeeper client closed exception.
*/
public static class BKClientClosedException extends BKException {
public BKClientClosedException() {
super(BKException.Code.ClientClosedException);
}
}
/**
* Bookkeeper timeout exception.
*/
public static class BKTimeoutException extends BKException {
public BKTimeoutException() {
super(BKException.Code.TimeoutException);
}
}
/**
* Bookkeeper ledger id overflow exception.
*/
public static class BKLedgerIdOverflowException extends BKException {
public BKLedgerIdOverflowException() {
super(BKException.Code.LedgerIdOverflowException);
}
}
/**
* Bookkeeper metadata serialization exception.
*/
public static class BKMetadataSerializationException extends BKException {
public BKMetadataSerializationException() {
super(BKException.Code.MetadataSerializationException);
}
public BKMetadataSerializationException(Throwable cause) {
super(BKException.Code.MetadataSerializationException, cause);
}
}
/**
* Bookkeeper ledger in limbo and data may or may not exist.
*/
public static class BKDataUnknownException extends BKException {
public BKDataUnknownException() {
super(BKException.Code.DataUnknownException);
}
}
/**
* Extract an exception code from an BKException, or use a default if it's another type.
* The throwable is null, assume that no exception took place and return
* {@link BKException.Code#OK}.
*/
public static int getExceptionCode(Throwable t, int defaultCode) {
if (t == null) {
return BKException.Code.OK;
} else if (t instanceof BKException) {
return ((BKException) t).getCode();
} else if (t.getCause() != null) {
return getExceptionCode(t.getCause(), defaultCode);
} else {
return defaultCode;
}
}
/**
* Extract an exception code from an BKException, or default to unexpected exception if throwable
* is not a BKException.
*
* @see #getExceptionCode(Throwable,int)
*/
public static int getExceptionCode(Throwable t) {
return getExceptionCode(t, Code.UnexpectedConditionException);
}
}
| 316 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ReadOnlyLedgerHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.annotations.VisibleForTesting;
import java.security.GeneralSecurityException;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.RejectedExecutionException;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.AsyncCallback.CloseCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadLastConfirmedCallback;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.LedgerMetadataListener;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryListener;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.TimedGenericCallback;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Read only ledger handle. This ledger handle allows you to
* read from a ledger but not to write to it. It overrides all
* the public write operations from LedgerHandle.
* It should be returned for BookKeeper#openLedger operations.
*/
class ReadOnlyLedgerHandle extends LedgerHandle implements LedgerMetadataListener {
private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyLedgerHandle.class);
private Object metadataLock = new Object();
private final NavigableMap<Long, List<BookieId>> newEnsemblesFromRecovery = new TreeMap<>();
class MetadataUpdater implements Runnable {
final Versioned<LedgerMetadata> newMetadata;
MetadataUpdater(Versioned<LedgerMetadata> metadata) {
this.newMetadata = metadata;
}
@Override
public void run() {
while (true) {
Versioned<LedgerMetadata> currentMetadata = getVersionedLedgerMetadata();
Version.Occurred occurred = currentMetadata.getVersion().compare(newMetadata.getVersion());
if (Version.Occurred.BEFORE == occurred) {
synchronized (ReadOnlyLedgerHandle.this) {
if (setLedgerMetadata(currentMetadata, newMetadata)) {
LOG.info("Updated ledger metadata for ledger {} to {}, version {}.",
ledgerId, newMetadata.getValue().toSafeString(), newMetadata.getVersion());
break;
}
}
} else {
break;
}
}
}
@Override
public String toString() {
return String.format("MetadataUpdater(%d)", ledgerId);
}
}
ReadOnlyLedgerHandle(ClientContext clientCtx,
long ledgerId, Versioned<LedgerMetadata> metadata,
BookKeeper.DigestType digestType, byte[] password,
boolean watch)
throws GeneralSecurityException, NumberFormatException {
super(clientCtx, ledgerId, metadata, digestType, password, WriteFlag.NONE);
if (watch) {
clientCtx.getLedgerManager().registerLedgerMetadataListener(ledgerId, this);
}
}
@Override
public void close()
throws InterruptedException, BKException {
clientCtx.getLedgerManager().unregisterLedgerMetadataListener(ledgerId, this);
}
@Override
public void asyncClose(CloseCallback cb, Object ctx) {
clientCtx.getLedgerManager().unregisterLedgerMetadataListener(ledgerId, this);
cb.closeComplete(BKException.Code.OK, this, ctx);
}
@Override
public long addEntry(byte[] data) throws InterruptedException, BKException {
return addEntry(data, 0, data.length);
}
@Override
public long addEntry(byte[] data, int offset, int length)
throws InterruptedException, BKException {
LOG.error("Tried to add entry on a Read-Only ledger handle, ledgerid=" + ledgerId);
throw BKException.create(BKException.Code.IllegalOpException);
}
@Override
public void asyncAddEntry(final byte[] data, final AddCallback cb,
final Object ctx) {
asyncAddEntry(data, 0, data.length, cb, ctx);
}
@Override
public void asyncAddEntry(final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
LOG.error("Tried to add entry on a Read-Only ledger handle, ledgerid=" + ledgerId);
cb.addComplete(BKException.Code.IllegalOpException, this,
LedgerHandle.INVALID_ENTRY_ID, ctx);
}
@Override
public void onChanged(long lid, Versioned<LedgerMetadata> newMetadata) {
if (LOG.isDebugEnabled()) {
LOG.debug("Received ledger metadata update on {} : {}", lid, newMetadata);
}
if (this.ledgerId != lid) {
return;
}
if (null == newMetadata) {
return;
}
Versioned<LedgerMetadata> currentMetadata = getVersionedLedgerMetadata();
Version.Occurred occurred = currentMetadata.getVersion().compare(newMetadata.getVersion());
if (LOG.isDebugEnabled()) {
LOG.debug("Try to update metadata from {} to {} : {}",
currentMetadata, newMetadata, occurred);
}
if (Version.Occurred.BEFORE == occurred) { // the metadata is updated
try {
clientCtx.getMainWorkerPool().executeOrdered(ledgerId, new MetadataUpdater(newMetadata));
} catch (RejectedExecutionException ree) {
LOG.error("Failed on submitting updater to update ledger metadata on ledger {} : {}",
ledgerId, newMetadata);
}
}
}
@Override
public String toString() {
return String.format("ReadOnlyLedgerHandle(lid = %d, id = %d)", ledgerId, super.hashCode());
}
@Override
protected void initializeWriteHandleState() {
// Essentially a noop, we don't want to set up write handle state here for a ReadOnlyLedgerHandle
explicitLacFlushPolicy = ExplicitLacFlushPolicy.VOID_EXPLICITLAC_FLUSH_POLICY;
}
@Override
public void asyncReadLastEntry(ReadCallback cb, Object ctx) {
asyncReadLastConfirmed(new ReadLastConfirmedCallback() {
@Override
public void readLastConfirmedComplete(int rc, long lastConfirmed, Object ctx) {
if (rc == BKException.Code.OK) {
if (lastConfirmed < 0) {
// Ledger was empty, so there is no last entry to read
cb.readComplete(BKException.Code.NoSuchEntryException, ReadOnlyLedgerHandle.this, null, ctx);
} else {
asyncReadEntriesInternal(lastConfirmed, lastConfirmed, cb, ctx, false);
}
} else {
LOG.error("ReadException in asyncReadLastEntry, ledgerId: {}, lac: {}, rc:{}",
lastConfirmed, ledgerId, rc);
cb.readComplete(rc, ReadOnlyLedgerHandle.this, null, ctx);
}
}
}, ctx);
}
/**
* For a read only ledger handle, this method will only ever be called during recovery,
* when we are reading forward from LAC and writing back those entries. As such,
* unlike with LedgerHandle, we do not want to persist changes to the metadata as they occur,
* but rather, we want to defer the persistence until recovery has completed, and do it all
* on the close.
*/
@Override
void handleBookieFailure(final Map<Integer, BookieId> failedBookies) {
// handleBookieFailure should always run in the ordered executor thread for this
// ledger, so this synchronized should be unnecessary, but putting it here now
// just in case (can be removed when we validate threads)
synchronized (metadataLock) {
String logContext = String.format("[RecoveryEnsembleChange(ledger:%d)]", ledgerId);
long lac = getLastAddConfirmed();
LedgerMetadata metadata = getLedgerMetadata();
List<BookieId> currentEnsemble = getCurrentEnsemble();
try {
List<BookieId> newEnsemble = EnsembleUtils.replaceBookiesInEnsemble(
clientCtx.getBookieWatcher(), metadata, currentEnsemble, failedBookies, logContext);
Set<Integer> replaced = EnsembleUtils.diffEnsemble(currentEnsemble, newEnsemble);
if (!replaced.isEmpty()) {
newEnsemblesFromRecovery.put(lac + 1, newEnsemble);
unsetSuccessAndSendWriteRequest(newEnsemble, replaced);
}
} catch (BKException.BKNotEnoughBookiesException e) {
LOG.error("Could not get additional bookie to remake ensemble, closing ledger: {}", ledgerId);
handleUnrecoverableErrorDuringAdd(e.getCode());
return;
}
}
}
@Override
void handleUnrecoverableErrorDuringAdd(int rc) {
errorOutPendingAdds(rc);
}
void recover(GenericCallback<Void> finalCb) {
recover(finalCb, null, false);
}
/**
* Recover the ledger.
*
* @param finalCb
* callback after recovery is done.
* @param listener
* read entry listener on recovery reads.
* @param forceRecovery
* force the recovery procedure even the ledger metadata shows the ledger is closed.
*/
void recover(GenericCallback<Void> finalCb,
final @VisibleForTesting ReadEntryListener listener,
final boolean forceRecovery) {
final GenericCallback<Void> cb = new TimedGenericCallback<Void>(
finalCb,
BKException.Code.OK,
clientCtx.getClientStats().getRecoverOpLogger());
MetadataUpdateLoop.NeedsUpdatePredicate needsUpdate =
(metadata) -> metadata.getState() == LedgerMetadata.State.OPEN;
if (forceRecovery) {
// in the force recovery case, we want to update the metadata
// to IN_RECOVERY, even if the ledger is already closed
needsUpdate = (metadata) -> metadata.getState() != LedgerMetadata.State.IN_RECOVERY;
}
new MetadataUpdateLoop(
clientCtx.getLedgerManager(), getId(),
this::getVersionedLedgerMetadata,
needsUpdate,
(metadata) -> LedgerMetadataBuilder.from(metadata).withInRecoveryState().build(),
this::setLedgerMetadata)
.run()
.thenCompose((metadata) -> {
if (metadata.getValue().isClosed()) {
return CompletableFuture.completedFuture(ReadOnlyLedgerHandle.this);
} else {
return new LedgerRecoveryOp(ReadOnlyLedgerHandle.this, clientCtx)
.setEntryListener(listener)
.initiate();
}
})
.thenCompose((ignore) -> closeRecovered())
.whenComplete((ignore, ex) -> {
if (ex != null) {
cb.operationComplete(
BKException.getExceptionCode(ex, BKException.Code.UnexpectedConditionException), null);
} else {
cb.operationComplete(BKException.Code.OK, null);
}
});
}
CompletableFuture<Versioned<LedgerMetadata>> closeRecovered() {
long lac, len;
synchronized (this) {
lac = lastAddConfirmed;
len = length;
}
LOG.info("Closing recovered ledger {} at entry {}", getId(), lac);
CompletableFuture<Versioned<LedgerMetadata>> f = new MetadataUpdateLoop(
clientCtx.getLedgerManager(), getId(),
this::getVersionedLedgerMetadata,
(metadata) -> metadata.getState() == LedgerMetadata.State.IN_RECOVERY,
(metadata) -> {
LedgerMetadataBuilder builder = LedgerMetadataBuilder.from(metadata);
Long lastEnsembleKey = LedgerMetadataUtils.getLastEnsembleKey(metadata);
synchronized (metadataLock) {
newEnsemblesFromRecovery.entrySet().forEach(
(e) -> {
checkState(e.getKey() >= lastEnsembleKey,
"Once a ledger is in recovery, noone can add ensembles without closing");
// Occurs when a bookie need to be replaced at very start of recovery
if (lastEnsembleKey.equals(e.getKey())) {
builder.replaceEnsembleEntry(e.getKey(), e.getValue());
} else {
builder.newEnsembleEntry(e.getKey(), e.getValue());
}
});
}
return builder.withClosedState().withLastEntryId(lac).withLength(len).build();
},
this::setLedgerMetadata).run();
f.whenComplete((result, exception) -> {
synchronized (metadataLock) {
newEnsemblesFromRecovery.clear();
}
if (exception != null) {
LOG.error("When closeRecovered,failed on clearing newEnsemblesFromRecovery.", exception);
}
});
return f;
}
@Override
List<BookieId> getCurrentEnsemble() {
synchronized (metadataLock) {
if (!newEnsemblesFromRecovery.isEmpty()) {
return newEnsemblesFromRecovery.lastEntry().getValue();
} else {
return super.getCurrentEnsemble();
}
}
}
}
| 317 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DefaultBookieAddressResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.proto.BookieAddressResolver;
/**
* Resolve BookieIDs to Network addresses.
*/
@Slf4j
public class DefaultBookieAddressResolver implements BookieAddressResolver {
private final RegistrationClient registrationClient;
public DefaultBookieAddressResolver(RegistrationClient registrationClient) {
this.registrationClient = registrationClient;
}
public RegistrationClient getRegistrationClient() {
return registrationClient;
}
@Override
public BookieSocketAddress resolve(BookieId bookieId) {
try {
BookieServiceInfo info = FutureUtils.result(registrationClient.getBookieServiceInfo(bookieId)).getValue();
BookieServiceInfo.Endpoint endpoint = info.getEndpoints()
.stream().filter(e -> e.getProtocol().equals("bookie-rpc")).findAny().orElse(null);
if (endpoint == null) {
throw new Exception("bookie " + bookieId + " does not publish a bookie-rpc endpoint");
}
BookieSocketAddress res = new BookieSocketAddress(endpoint.getHost(), endpoint.getPort());
if (!bookieId.toString().equals(res.toString())) {
// only print if the information is useful
log.info("Resolved {} as {}", bookieId, res);
} else if (log.isDebugEnabled()) {
log.debug("Resolved {} as {}", bookieId, res);
}
return res;
} catch (BKException.BKBookieHandleNotAvailableException ex) {
if (BookieSocketAddress.isDummyBookieIdForHostname(bookieId)) {
if (log.isDebugEnabled()) {
log.debug("Resolving dummy bookie Id {} using legacy bookie resolver", bookieId);
}
return BookieSocketAddress.resolveLegacyBookieId(bookieId);
}
log.info("Cannot resolve {}, bookie is unknown {}", bookieId, ex.toString());
throw new BookieIdNotResolvedException(bookieId, ex);
} catch (Exception ex) {
if (ex instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
throw new BookieIdNotResolvedException(bookieId, ex);
}
}
}
| 318 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/SpeculativeRequestExecutor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import com.google.common.util.concurrent.ListenableFuture;
/**
* Define an executor for issuing speculative requests.
*
* <p>If the implementation can issue a speculative read, it should return true
* to indicate a speculative request should be issued. Otherwise, return false.
*
* @since 4.5
*/
public interface SpeculativeRequestExecutor {
/**
* Issues a speculative request and indicates if more speculative
* requests should be issued.
*
* @return whether more speculative requests should be issued
*/
ListenableFuture<Boolean> issueSpeculativeRequest();
}
| 319 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class WeightedRandomSelectionImpl<T> implements WeightedRandomSelection<T> {
static final Logger LOG = LoggerFactory.getLogger(WeightedRandomSelectionImpl.class);
Double randomMax;
int maxProbabilityMultiplier;
Map<T, WeightedObject> map;
TreeMap<Double, T> cummulativeMap = new TreeMap<Double, T>();
ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
WeightedRandomSelectionImpl() {
maxProbabilityMultiplier = -1;
}
WeightedRandomSelectionImpl(int maxMultiplier) {
this.maxProbabilityMultiplier = maxMultiplier;
}
@Override
public void updateMap(Map<T, WeightedObject> map) {
// get the sum total of all the values; this will be used to
// calculate the weighted probability later on
Long totalWeight = 0L, min = Long.MAX_VALUE;
List<WeightedObject> values = new ArrayList<WeightedObject>(map.values());
Collections.sort(values, new Comparator<WeightedObject>() {
@Override
public int compare(WeightedObject o1, WeightedObject o2) {
long diff = o1.getWeight() - o2.getWeight();
if (diff < 0L) {
return -1;
} else if (diff > 0L) {
return 1;
} else {
return 0;
}
}
});
for (int i = 0; i < values.size(); i++) {
totalWeight += values.get(i).getWeight();
if (values.get(i).getWeight() != 0 && min > values.get(i).getWeight()) {
min = values.get(i).getWeight();
}
}
double median = 0;
if (totalWeight == 0) {
// all the values are zeros; assign a value of 1 to all and the totalWeight equal
// to the size of the values
min = 1L;
median = 1;
totalWeight = (long) values.size();
} else {
int mid = values.size() / 2;
if ((values.size() % 2) == 1) {
median = values.get(mid).getWeight();
} else {
median = (double) (values.get(mid - 1).getWeight() + values.get(mid).getWeight()) / 2;
}
}
double medianWeight, minWeight;
medianWeight = median / (double) totalWeight;
minWeight = (double) min / totalWeight;
if (LOG.isDebugEnabled()) {
LOG.debug("Updating weights map. MediaWeight: {} MinWeight: {}", medianWeight, minWeight);
}
double maxWeight = maxProbabilityMultiplier * medianWeight;
Map<T, Double> weightMap = new HashMap<T, Double>();
for (Map.Entry<T, WeightedObject> e : map.entrySet()) {
double weightedProbability;
if (e.getValue().getWeight() > 0) {
weightedProbability = (double) e.getValue().getWeight() / (double) totalWeight;
} else {
weightedProbability = minWeight;
}
if (maxWeight > 0 && weightedProbability > maxWeight) {
weightedProbability = maxWeight;
if (LOG.isDebugEnabled()) {
LOG.debug("Capping the probability to {} for {} Value: {}",
weightedProbability, e.getKey(), e.getValue());
}
}
weightMap.put(e.getKey(), weightedProbability);
}
// The probability of picking a bookie randomly is defaultPickProbability
// but we change that priority by looking at the weight that each bookie
// carries.
TreeMap<Double, T> tmpCummulativeMap = new TreeMap<Double, T>();
Double key = 0.0;
for (Map.Entry<T, Double> e : weightMap.entrySet()) {
tmpCummulativeMap.put(key, e.getKey());
if (LOG.isDebugEnabled()) {
LOG.debug("Key: {} Value: {} AssignedKey: {} AssignedWeight: {}",
e.getKey(), e.getValue(), key, e.getValue());
}
key += e.getValue();
}
rwLock.writeLock().lock();
try {
this.map = map;
cummulativeMap = tmpCummulativeMap;
randomMax = key;
} finally {
rwLock.writeLock().unlock();
}
}
@Override
public T getNextRandom() {
rwLock.readLock().lock();
try {
// pick a random number between 0 and randMax
Double randomNum = randomMax * Math.random();
// find the nearest key in the map corresponding to the randomNum
Double key = cummulativeMap.floorKey(randomNum);
return cummulativeMap.get(key);
} finally {
rwLock.readLock().unlock();
}
}
@Override
public void setMaxProbabilityMultiplier(int max) {
this.maxProbabilityMultiplier = max;
}
@Override
public T getNextRandom(Collection<T> selectedNodes) {
throw new UnsupportedOperationException("getNextRandom is not implemented for WeightedRandomSelectionImpl");
}
}
| 320 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ZoneawareEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.util.HashedWheelTimer;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* A placement policy implementation use zone information for placing ensembles.
*
* @see EnsemblePlacementPolicy
*/
public class ZoneawareEnsemblePlacementPolicy extends ZoneawareEnsemblePlacementPolicyImpl
implements ITopologyAwareEnsemblePlacementPolicy<BookieNode> {
ZoneawareEnsemblePlacementPolicyImpl slave = null;
public ZoneawareEnsemblePlacementPolicy() {
super();
}
@Override
public EnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver, HashedWheelTimer timer,
FeatureProvider featureProvider,
StatsLogger statsLogger, BookieAddressResolver bookieAddressResolver) {
if (conf.getNetworkTopologyStabilizePeriodSeconds() > 0) {
ClientConfiguration confClone = new ClientConfiguration(conf);
confClone.setNetworkTopologyStabilizePeriodSeconds(0);
super.initialize(confClone, optionalDnsResolver, timer, featureProvider,
statsLogger, bookieAddressResolver);
slave = new ZoneawareEnsemblePlacementPolicyImpl();
slave.initialize(conf, optionalDnsResolver, timer, featureProvider, statsLogger, bookieAddressResolver);
} else {
super.initialize(conf, optionalDnsResolver, timer, featureProvider, statsLogger, bookieAddressResolver);
slave = null;
}
return this;
}
@Override
public void uninitalize() {
super.uninitalize();
if (null != slave) {
slave.uninitalize();
}
}
@Override
public Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies) {
Set<BookieId> deadBookies = super.onClusterChanged(writableBookies, readOnlyBookies);
if (null != slave) {
deadBookies = slave.onClusterChanged(writableBookies, readOnlyBookies);
}
return deadBookies;
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKException.BKNotEnoughBookiesException {
try {
return super.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, excludeBookies);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, excludeBookies);
}
}
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKException.BKNotEnoughBookiesException {
try {
return super.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata,
currentEnsemble, bookieToReplace, excludeBookies);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata,
currentEnsemble, bookieToReplace, excludeBookies);
}
}
}
@Override
public void handleBookiesThatLeft(Set<BookieId> leftBookies) {
super.handleBookiesThatLeft(leftBookies);
if (null != slave) {
slave.handleBookiesThatLeft(leftBookies);
}
}
@Override
public void handleBookiesThatJoined(Set<BookieId> joinedBookies) {
super.handleBookiesThatJoined(joinedBookies);
if (null != slave) {
slave.handleBookiesThatJoined(joinedBookies);
}
}
}
| 321 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackChangeNotifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import org.apache.bookkeeper.net.BookieNode;
/**
* Notifier used by the RackawareEnsemblePlacementPolicy to get notified if a
* rack changes for a bookie.
*/
public interface RackChangeNotifier {
/**
* Register a listener for the rack-aware placement policy.
*
* @param rackawarePolicy
*/
void registerRackChangeListener(
ITopologyAwareEnsemblePlacementPolicy<BookieNode> rackawarePolicy);
}
| 322 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/PendingReadOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.buffer.ByteBuf;
import java.util.BitSet;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
import org.apache.bookkeeper.client.api.LedgerEntries;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.impl.LedgerEntriesImpl;
import org.apache.bookkeeper.client.impl.LedgerEntryImpl;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallbackCtx;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.apache.bookkeeper.util.MathUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Sequence of entries of a ledger that represents a pending read operation.
* When all the data read has come back, the application callback is called.
* This class could be improved because we could start pushing data to the
* application as soon as it arrives rather than waiting for the whole thing.
*
*/
class PendingReadOp implements ReadEntryCallback, Runnable {
private static final Logger LOG = LoggerFactory.getLogger(PendingReadOp.class);
private ScheduledFuture<?> speculativeTask = null;
protected final LinkedList<LedgerEntryRequest> seq;
private final CompletableFuture<LedgerEntries> future;
private final Set<BookieId> heardFromHosts;
private final BitSet heardFromHostsBitSet;
private final Set<BookieId> sentToHosts = new HashSet<BookieId>();
LedgerHandle lh;
final ClientContext clientCtx;
long numPendingEntries;
final long startEntryId;
final long endEntryId;
long requestTimeNanos;
final int requiredBookiesMissingEntryForRecovery;
final boolean isRecoveryRead;
boolean parallelRead = false;
final AtomicBoolean complete = new AtomicBoolean(false);
boolean allowFailFast = false;
abstract class LedgerEntryRequest implements SpeculativeRequestExecutor, AutoCloseable {
final AtomicBoolean complete = new AtomicBoolean(false);
int rc = BKException.Code.OK;
int firstError = BKException.Code.OK;
int numBookiesMissingEntry = 0;
final List<BookieId> ensemble;
final DistributionSchedule.WriteSet writeSet;
final LedgerEntryImpl entryImpl;
final long eId;
LedgerEntryRequest(List<BookieId> ensemble, long lId, long eId) {
this.entryImpl = LedgerEntryImpl.create(lId, eId);
this.ensemble = ensemble;
this.eId = eId;
if (clientCtx.getConf().enableReorderReadSequence) {
writeSet = clientCtx.getPlacementPolicy()
.reorderReadSequence(
ensemble,
lh.getBookiesHealthInfo(),
lh.getWriteSetForReadOperation(eId));
} else {
writeSet = lh.getWriteSetForReadOperation(eId);
}
}
@Override
public void close() {
// this request has succeeded before, can't recycle writeSet again
if (complete.compareAndSet(false, true)) {
rc = BKException.Code.UnexpectedConditionException;
writeSet.recycle();
}
entryImpl.close();
}
/**
* Execute the read request.
*/
abstract void read();
/**
* Complete the read request from <i>host</i>.
*
* @param bookieIndex
* bookie index
* @param host
* host that respond the read
* @param buffer
* the data buffer
* @return return true if we managed to complete the entry;
* otherwise return false if the read entry is not complete or it is already completed before
*/
boolean complete(int bookieIndex, BookieId host, final ByteBuf buffer) {
ByteBuf content;
if (isComplete()) {
return false;
}
try {
content = lh.macManager.verifyDigestAndReturnData(eId, buffer);
} catch (BKDigestMatchException e) {
clientCtx.getClientStats().getReadOpDmCounter().inc();
logErrorAndReattemptRead(bookieIndex, host, "Mac mismatch", BKException.Code.DigestMatchException);
return false;
}
if (!complete.getAndSet(true)) {
rc = BKException.Code.OK;
/*
* The length is a long and it is the last field of the metadata of an entry.
* Consequently, we have to subtract 8 from METADATA_LENGTH to get the length.
*/
entryImpl.setLength(buffer.getLong(DigestManager.METADATA_LENGTH - 8));
entryImpl.setEntryBuf(content);
writeSet.recycle();
return true;
} else {
return false;
}
}
/**
* Fail the request with given result code <i>rc</i>.
*
* @param rc
* result code to fail the request.
* @return true if we managed to fail the entry; otherwise return false if it already failed or completed.
*/
boolean fail(int rc) {
if (complete.compareAndSet(false, true)) {
this.rc = rc;
submitCallback(rc);
return true;
} else {
return false;
}
}
/**
* Log error <i>errMsg</i> and reattempt read from <i>host</i>.
*
* @param bookieIndex
* bookie index
* @param host
* host that just respond
* @param errMsg
* error msg to log
* @param rc
* read result code
*/
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
if (BKException.Code.OK == firstError
|| BKException.Code.NoSuchEntryException == firstError
|| BKException.Code.NoSuchLedgerExistsException == firstError) {
firstError = rc;
} else if (BKException.Code.BookieHandleNotAvailableException == firstError
&& BKException.Code.NoSuchEntryException != rc
&& BKException.Code.NoSuchLedgerExistsException != rc) {
// if other exception rather than NoSuchEntryException or NoSuchLedgerExistsException is
// returned we need to update firstError to indicate that it might be a valid read but just
// failed.
firstError = rc;
}
if (BKException.Code.NoSuchEntryException == rc
|| BKException.Code.NoSuchLedgerExistsException == rc) {
++numBookiesMissingEntry;
if (LOG.isDebugEnabled()) {
LOG.debug("No such entry found on bookie. L{} E{} bookie: {}",
lh.ledgerId, eId, host);
}
} else {
if (LOG.isInfoEnabled()) {
LOG.info("{} while reading L{} E{} from bookie: {}",
errMsg, lh.ledgerId, eId, host);
}
}
lh.recordReadErrorOnBookie(bookieIndex);
}
/**
* Send to next replica speculatively, if required and possible.
* This returns the host we may have sent to for unit testing.
*
* @param heardFromHostsBitSet
* the set of hosts that we already received responses.
* @return host we sent to if we sent. null otherwise.
*/
abstract BookieId maybeSendSpeculativeRead(BitSet heardFromHostsBitSet);
/**
* Whether the read request completed.
*
* @return true if the read request is completed.
*/
boolean isComplete() {
return complete.get();
}
/**
* Get result code of this entry.
*
* @return result code.
*/
int getRc() {
return rc;
}
@Override
public String toString() {
return String.format("L%d-E%d", lh.getId(), eId);
}
/**
* Issues a speculative request and indicates if more speculative
* requests should be issued.
*
* @return whether more speculative requests should be issued
*/
@Override
public ListenableFuture<Boolean> issueSpeculativeRequest() {
return clientCtx.getMainWorkerPool().submitOrdered(lh.getId(), new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
if (!isComplete() && null != maybeSendSpeculativeRead(heardFromHostsBitSet)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Send speculative read for {}. Hosts sent are {}, "
+ " Hosts heard are {}, ensemble is {}.",
this, sentToHosts, heardFromHostsBitSet, ensemble);
}
return true;
}
return false;
}
});
}
}
class ParallelReadRequest extends LedgerEntryRequest {
int numPendings;
ParallelReadRequest(List<BookieId> ensemble, long lId, long eId) {
super(ensemble, lId, eId);
numPendings = writeSet.size();
}
@Override
void read() {
for (int i = 0; i < writeSet.size(); i++) {
BookieId to = ensemble.get(writeSet.get(i));
try {
sendReadTo(writeSet.get(i), to, this);
} catch (InterruptedException ie) {
LOG.error("Interrupted reading entry {} : ", this, ie);
Thread.currentThread().interrupt();
fail(BKException.Code.InterruptedException);
return;
}
}
}
@Override
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
super.logErrorAndReattemptRead(bookieIndex, host, errMsg, rc);
// if received all responses or this entry doesn't meet quorum write, complete the request.
--numPendings;
if (isRecoveryRead && numBookiesMissingEntry >= requiredBookiesMissingEntryForRecovery) {
/* For recovery, report NoSuchEntry as soon as wQ-aQ+1 bookies report that they do not
* have the entry */
fail(BKException.Code.NoSuchEntryException);
} else if (numPendings == 0) {
// if received all responses, complete the request.
fail(firstError);
}
}
@Override
BookieId maybeSendSpeculativeRead(BitSet heardFromHostsBitSet) {
// no speculative read
return null;
}
}
class SequenceReadRequest extends LedgerEntryRequest {
static final int NOT_FOUND = -1;
int nextReplicaIndexToReadFrom = 0;
final BitSet sentReplicas;
final BitSet erroredReplicas;
SequenceReadRequest(List<BookieId> ensemble, long lId, long eId) {
super(ensemble, lId, eId);
this.sentReplicas = new BitSet(lh.getLedgerMetadata().getWriteQuorumSize());
this.erroredReplicas = new BitSet(lh.getLedgerMetadata().getWriteQuorumSize());
}
private synchronized int getNextReplicaIndexToReadFrom() {
return nextReplicaIndexToReadFrom;
}
private BitSet getSentToBitSet() {
BitSet b = new BitSet(ensemble.size());
for (int i = 0; i < sentReplicas.length(); i++) {
if (sentReplicas.get(i)) {
b.set(writeSet.get(i));
}
}
return b;
}
private boolean readsOutstanding() {
return (sentReplicas.cardinality() - erroredReplicas.cardinality()) > 0;
}
/**
* Send to next replica speculatively, if required and possible.
* This returns the host we may have sent to for unit testing.
* @return host we sent to if we sent. null otherwise.
*/
@Override
synchronized BookieId maybeSendSpeculativeRead(BitSet heardFrom) {
if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getWriteQuorumSize()) {
return null;
}
BitSet sentTo = getSentToBitSet();
sentTo.and(heardFrom);
// only send another read if we have had no successful response at all
// (even for other entries) from any of the other bookies we have sent the
// request to
if (sentTo.cardinality() == 0) {
clientCtx.getClientStats().getSpeculativeReadCounter().inc();
return sendNextRead();
} else {
return null;
}
}
@Override
void read() {
sendNextRead();
}
synchronized BookieId sendNextRead() {
if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getWriteQuorumSize()) {
// we are done, the read has failed from all replicas, just fail the
// read
fail(firstError);
return null;
}
// ToDo: pick replica with writable PCBC. ISSUE #1239
// https://github.com/apache/bookkeeper/issues/1239
int replica = nextReplicaIndexToReadFrom;
int bookieIndex = writeSet.get(nextReplicaIndexToReadFrom);
nextReplicaIndexToReadFrom++;
try {
BookieId to = ensemble.get(bookieIndex);
sendReadTo(bookieIndex, to, this);
sentToHosts.add(to);
sentReplicas.set(replica);
return to;
} catch (InterruptedException ie) {
LOG.error("Interrupted reading entry " + this, ie);
Thread.currentThread().interrupt();
fail(BKException.Code.InterruptedException);
return null;
}
}
@Override
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
super.logErrorAndReattemptRead(bookieIndex, host, errMsg, rc);
int replica = writeSet.indexOf(bookieIndex);
if (replica == NOT_FOUND) {
LOG.error("Received error from a host which is not in the ensemble {} {}.", host, ensemble);
return;
}
erroredReplicas.set(replica);
if (isRecoveryRead && (numBookiesMissingEntry >= requiredBookiesMissingEntryForRecovery)) {
/* For recovery, report NoSuchEntry as soon as wQ-aQ+1 bookies report that they do not
* have the entry */
fail(BKException.Code.NoSuchEntryException);
return;
}
if (!readsOutstanding()) {
sendNextRead();
}
}
@Override
boolean complete(int bookieIndex, BookieId host, ByteBuf buffer) {
boolean completed = super.complete(bookieIndex, host, buffer);
if (completed) {
int numReplicasTried = getNextReplicaIndexToReadFrom();
// Check if any speculative reads were issued and mark any slow bookies before
// the first successful speculative read as "slow"
for (int i = 0; i < numReplicasTried - 1; i++) {
int slowBookieIndex = writeSet.get(i);
BookieId slowBookieSocketAddress = ensemble.get(slowBookieIndex);
clientCtx.getPlacementPolicy().registerSlowBookie(slowBookieSocketAddress, eId);
}
}
return completed;
}
}
PendingReadOp(LedgerHandle lh,
ClientContext clientCtx,
long startEntryId,
long endEntryId,
boolean isRecoveryRead) {
this.seq = new LinkedList<>();
this.future = new CompletableFuture<>();
this.lh = lh;
this.clientCtx = clientCtx;
this.startEntryId = startEntryId;
this.endEntryId = endEntryId;
this.isRecoveryRead = isRecoveryRead;
this.allowFailFast = false;
numPendingEntries = endEntryId - startEntryId + 1;
requiredBookiesMissingEntryForRecovery = getLedgerMetadata().getWriteQuorumSize()
- getLedgerMetadata().getAckQuorumSize() + 1;
heardFromHosts = new HashSet<>();
heardFromHostsBitSet = new BitSet(getLedgerMetadata().getEnsembleSize());
}
CompletableFuture<LedgerEntries> future() {
return future;
}
protected LedgerMetadata getLedgerMetadata() {
return lh.getLedgerMetadata();
}
protected void cancelSpeculativeTask(boolean mayInterruptIfRunning) {
if (speculativeTask != null) {
speculativeTask.cancel(mayInterruptIfRunning);
speculativeTask = null;
}
}
public ScheduledFuture<?> getSpeculativeTask() {
return speculativeTask;
}
PendingReadOp parallelRead(boolean enabled) {
this.parallelRead = enabled;
return this;
}
void allowFailFastOnUnwritableChannel() {
allowFailFast = true;
}
public void submit() {
clientCtx.getMainWorkerPool().executeOrdered(lh.ledgerId, this);
}
void initiate() {
long nextEnsembleChange = startEntryId, i = startEntryId;
this.requestTimeNanos = MathUtils.nowInNano();
List<BookieId> ensemble = null;
do {
if (i == nextEnsembleChange) {
ensemble = getLedgerMetadata().getEnsembleAt(i);
nextEnsembleChange = LedgerMetadataUtils.getNextEnsembleChange(getLedgerMetadata(), i);
}
LedgerEntryRequest entry;
if (parallelRead) {
entry = new ParallelReadRequest(ensemble, lh.ledgerId, i);
} else {
entry = new SequenceReadRequest(ensemble, lh.ledgerId, i);
}
seq.add(entry);
i++;
} while (i <= endEntryId);
// read the entries.
for (LedgerEntryRequest entry : seq) {
entry.read();
if (!parallelRead && clientCtx.getConf().readSpeculativeRequestPolicy.isPresent()) {
speculativeTask = clientCtx.getConf().readSpeculativeRequestPolicy.get()
.initiateSpeculativeRequest(clientCtx.getScheduler(), entry);
}
}
}
@Override
public void run() {
initiate();
}
private static class ReadContext implements ReadEntryCallbackCtx {
final int bookieIndex;
final BookieId to;
final LedgerEntryRequest entry;
long lac = LedgerHandle.INVALID_ENTRY_ID;
ReadContext(int bookieIndex, BookieId to, LedgerEntryRequest entry) {
this.bookieIndex = bookieIndex;
this.to = to;
this.entry = entry;
}
@Override
public void setLastAddConfirmed(long lac) {
this.lac = lac;
}
@Override
public long getLastAddConfirmed() {
return lac;
}
}
private static ReadContext createReadContext(int bookieIndex, BookieId to, LedgerEntryRequest entry) {
return new ReadContext(bookieIndex, to, entry);
}
void sendReadTo(int bookieIndex, BookieId to, LedgerEntryRequest entry) throws InterruptedException {
if (lh.throttler != null) {
lh.throttler.acquire();
}
if (isRecoveryRead) {
int flags = BookieProtocol.FLAG_HIGH_PRIORITY | BookieProtocol.FLAG_DO_FENCING;
clientCtx.getBookieClient().readEntry(to, lh.ledgerId, entry.eId,
this, new ReadContext(bookieIndex, to, entry), flags, lh.ledgerKey);
} else {
clientCtx.getBookieClient().readEntry(to, lh.ledgerId, entry.eId,
this, new ReadContext(bookieIndex, to, entry), BookieProtocol.FLAG_NONE);
}
}
@Override
public void readEntryComplete(int rc, long ledgerId, final long entryId, final ByteBuf buffer, Object ctx) {
final ReadContext rctx = (ReadContext) ctx;
final LedgerEntryRequest entry = rctx.entry;
if (rc != BKException.Code.OK) {
entry.logErrorAndReattemptRead(rctx.bookieIndex, rctx.to, "Error: " + BKException.getMessage(rc), rc);
return;
}
heardFromHosts.add(rctx.to);
heardFromHostsBitSet.set(rctx.bookieIndex, true);
buffer.retain();
// if entry has completed don't handle twice
if (entry.complete(rctx.bookieIndex, rctx.to, buffer)) {
if (!isRecoveryRead) {
// do not advance LastAddConfirmed for recovery reads
lh.updateLastConfirmed(rctx.getLastAddConfirmed(), 0L);
}
submitCallback(BKException.Code.OK);
} else {
buffer.release();
}
if (numPendingEntries < 0) {
LOG.error("Read too many values for ledger {} : [{}, {}].",
ledgerId, startEntryId, endEntryId);
}
}
protected void submitCallback(int code) {
if (BKException.Code.OK == code) {
numPendingEntries--;
if (numPendingEntries != 0) {
return;
}
}
// ensure callback once
if (!complete.compareAndSet(false, true)) {
return;
}
cancelSpeculativeTask(true);
long latencyNanos = MathUtils.elapsedNanos(requestTimeNanos);
if (code != BKException.Code.OK) {
long firstUnread = LedgerHandle.INVALID_ENTRY_ID;
Integer firstRc = null;
for (LedgerEntryRequest req : seq) {
if (!req.isComplete()) {
firstUnread = req.eId;
firstRc = req.rc;
break;
}
}
LOG.error(
"Read of ledger entry failed: L{} E{}-E{}, Sent to {}, "
+ "Heard from {} : bitset = {}, Error = '{}'. First unread entry is ({}, rc = {})",
lh.getId(), startEntryId, endEntryId, sentToHosts, heardFromHosts, heardFromHostsBitSet,
BKException.getMessage(code), firstUnread, firstRc);
clientCtx.getClientStats().getReadOpLogger().registerFailedEvent(latencyNanos, TimeUnit.NANOSECONDS);
// release the entries
seq.forEach(LedgerEntryRequest::close);
future.completeExceptionally(BKException.create(code));
} else {
clientCtx.getClientStats().getReadOpLogger().registerSuccessfulEvent(latencyNanos, TimeUnit.NANOSECONDS);
future.complete(LedgerEntriesImpl.create(Lists.transform(seq, input -> input.entryImpl)));
}
}
}
| 323 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RegionAwareEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.util.HashedWheelTimer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.Feature;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.net.NetworkTopology;
import org.apache.bookkeeper.net.NetworkTopologyImpl;
import org.apache.bookkeeper.net.Node;
import org.apache.bookkeeper.net.NodeBase;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.BookKeeperConstants;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A placement policy use region information in the network topology for placing ensembles.
*
* @see EnsemblePlacementPolicy
*/
public class RegionAwareEnsemblePlacementPolicy extends RackawareEnsemblePlacementPolicy {
static final Logger LOG = LoggerFactory.getLogger(RegionAwareEnsemblePlacementPolicy.class);
public static final String REPP_REGIONS_TO_WRITE = "reppRegionsToWrite";
public static final String REPP_MINIMUM_REGIONS_FOR_DURABILITY = "reppMinimumRegionsForDurability";
public static final String REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE =
"reppEnableDurabilityEnforcementInReplace";
public static final String REPP_DISABLE_DURABILITY_FEATURE_NAME = "reppDisableDurabilityFeatureName";
public static final String REPP_DISALLOW_BOOKIE_PLACEMENT_IN_REGION_FEATURE_NAME =
"reppDisallowBookiePlacementInRegionFeatureName";
public static final String REPP_DISABLE_DURABILITY_ENFORCEMENT_FEATURE = "reppDisableDurabilityEnforcementFeature";
public static final String REPP_ENABLE_VALIDATION = "reppEnableValidation";
public static final String REGION_AWARE_ANOMALOUS_ENSEMBLE = "region_aware_anomalous_ensemble";
static final int MINIMUM_REGIONS_FOR_DURABILITY_DEFAULT = 2;
static final int REGIONID_DISTANCE_FROM_LEAVES = 2;
static final String UNKNOWN_REGION = "UnknownRegion";
static final int REMOTE_NODE_IN_REORDER_SEQUENCE = 2;
protected final Map<String, TopologyAwareEnsemblePlacementPolicy> perRegionPlacement;
protected final ConcurrentMap<BookieId, String> address2Region;
protected FeatureProvider featureProvider;
protected String disallowBookiePlacementInRegionFeatureName;
protected String myRegion = null;
protected int minRegionsForDurability = 0;
protected boolean enableValidation = true;
protected boolean enforceDurabilityInReplace = false;
protected Feature disableDurabilityFeature;
private int lastRegionIndex = 0;
RegionAwareEnsemblePlacementPolicy() {
super();
perRegionPlacement = new HashMap<String, TopologyAwareEnsemblePlacementPolicy>();
address2Region = new ConcurrentHashMap<BookieId, String>();
}
protected String getLocalRegion(BookieNode node) {
if (null == node || null == node.getAddr()) {
return UNKNOWN_REGION;
}
return getRegion(node.getAddr());
}
protected String getRegion(BookieId addr) {
String region = address2Region.get(addr);
if (null == region) {
region = parseBookieRegion(addr);
address2Region.putIfAbsent(addr, region);
}
return region;
}
protected String parseBookieRegion(BookieId addr) {
String networkLocation = resolveNetworkLocation(addr);
if (NetworkTopology.DEFAULT_REGION_AND_RACK.equals(networkLocation)) {
return UNKNOWN_REGION;
} else {
String[] parts = networkLocation.split(NodeBase.PATH_SEPARATOR_STR);
if (parts.length <= 1) {
return UNKNOWN_REGION;
} else {
return parts[1];
}
}
}
@Override
public void handleBookiesThatLeft(Set<BookieId> leftBookies) {
super.handleBookiesThatLeft(leftBookies);
for (TopologyAwareEnsemblePlacementPolicy policy: perRegionPlacement.values()) {
policy.handleBookiesThatLeft(leftBookies);
}
}
@Override
public void handleBookiesThatJoined(Set<BookieId> joinedBookies) {
Map<String, Set<BookieId>> perRegionClusterChange = new HashMap<String, Set<BookieId>>();
// node joined
for (BookieId addr : joinedBookies) {
BookieNode node = createBookieNode(addr);
topology.add(node);
knownBookies.put(addr, node);
historyBookies.put(addr, node);
String region = getLocalRegion(node);
if (null == perRegionPlacement.get(region)) {
perRegionPlacement.put(region, new RackawareEnsemblePlacementPolicy()
.initialize(dnsResolver, timer, this.reorderReadsRandom, this.stabilizePeriodSeconds,
this.reorderThresholdPendingRequests, this.isWeighted, this.maxWeightMultiple,
this.minNumRacksPerWriteQuorum, this.enforceMinNumRacksPerWriteQuorum,
this.ignoreLocalNodeInPlacementPolicy,
this.useHostnameResolveLocalNodePlacementPolicy, statsLogger, bookieAddressResolver)
.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK));
}
Set<BookieId> regionSet = perRegionClusterChange.get(region);
if (null == regionSet) {
regionSet = new HashSet<BookieId>();
regionSet.add(addr);
perRegionClusterChange.put(region, regionSet);
} else {
regionSet.add(addr);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Cluster changed : bookie {} joined the cluster.", addr);
}
}
for (Map.Entry<String, TopologyAwareEnsemblePlacementPolicy> regionEntry : perRegionPlacement.entrySet()) {
Set<BookieId> regionSet = perRegionClusterChange.get(regionEntry.getKey());
if (null == regionSet) {
regionSet = new HashSet<BookieId>();
}
regionEntry.getValue().handleBookiesThatJoined(regionSet);
}
}
@Override
public void onBookieRackChange(List<BookieId> bookieAddressList) {
rwLock.writeLock().lock();
try {
bookieAddressList.forEach(bookieAddress -> {
try {
BookieNode node = knownBookies.get(bookieAddress);
if (node != null) {
// refresh the rack info if its a known bookie
BookieNode newNode = createBookieNode(bookieAddress);
if (!newNode.getNetworkLocation().equals(node.getNetworkLocation())) {
topology.remove(node);
topology.add(newNode);
knownBookies.put(bookieAddress, newNode);
historyBookies.put(bookieAddress, newNode);
}
//Handle per region placement policy.
String oldRegion = getRegion(bookieAddress);
String newRegion = parseBookieRegion(newNode.getAddr());
if (oldRegion.equals(newRegion)) {
TopologyAwareEnsemblePlacementPolicy regionPlacement = perRegionPlacement.get(oldRegion);
regionPlacement.onBookieRackChange(Collections.singletonList(bookieAddress));
} else {
address2Region.put(bookieAddress, newRegion);
TopologyAwareEnsemblePlacementPolicy oldRegionPlacement = perRegionPlacement.get(oldRegion);
oldRegionPlacement.handleBookiesThatLeft(Collections.singleton(bookieAddress));
TopologyAwareEnsemblePlacementPolicy newRegionPlacement = perRegionPlacement.get(
newRegion);
if (newRegionPlacement == null) {
newRegionPlacement = new RackawareEnsemblePlacementPolicy()
.initialize(dnsResolver, timer, this.reorderReadsRandom,
this.stabilizePeriodSeconds, this.reorderThresholdPendingRequests,
this.isWeighted, this.maxWeightMultiple,
this.minNumRacksPerWriteQuorum, this.enforceMinNumRacksPerWriteQuorum,
this.ignoreLocalNodeInPlacementPolicy,
this.useHostnameResolveLocalNodePlacementPolicy, statsLogger,
bookieAddressResolver)
.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK);
perRegionPlacement.put(newRegion, newRegionPlacement);
}
newRegionPlacement.handleBookiesThatJoined(Collections.singleton(bookieAddress));
}
}
} catch (IllegalArgumentException | NetworkTopologyImpl.InvalidTopologyException e) {
LOG.error("Failed to update bookie rack info: {} ", bookieAddress, e);
}
});
} finally {
rwLock.writeLock().unlock();
}
}
@Override
public RegionAwareEnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver,
HashedWheelTimer timer,
FeatureProvider featureProvider,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver) {
super.initialize(conf, optionalDnsResolver, timer, featureProvider, statsLogger, bookieAddressResolver)
.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK);
myRegion = getLocalRegion(localNode);
enableValidation = conf.getBoolean(REPP_ENABLE_VALIDATION, true);
// We have to statically provide regions we want the writes to go through and how many regions
// are required for durability. This decision cannot be driven by the active bookies as the
// current topology will not be indicative of constraints that must be enforced for durability
String regionsString = conf.getString(REPP_REGIONS_TO_WRITE, null);
if (null != regionsString) {
// Regions are specified as
// R1;R2;...
String[] regions = regionsString.split(";");
for (String region : regions) {
perRegionPlacement.put(region, new RackawareEnsemblePlacementPolicy(true)
.initialize(dnsResolver, timer, this.reorderReadsRandom, this.stabilizePeriodSeconds,
this.reorderThresholdPendingRequests, this.isWeighted, this.maxWeightMultiple,
this.minNumRacksPerWriteQuorum, this.enforceMinNumRacksPerWriteQuorum,
this.ignoreLocalNodeInPlacementPolicy, this.ignoreLocalNodeInPlacementPolicy,
statsLogger, bookieAddressResolver)
.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK));
}
minRegionsForDurability = conf.getInt(REPP_MINIMUM_REGIONS_FOR_DURABILITY,
MINIMUM_REGIONS_FOR_DURABILITY_DEFAULT);
if (minRegionsForDurability > 0) {
enforceDurability = true;
enforceDurabilityInReplace = conf.getBoolean(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE, true);
}
if (regions.length < minRegionsForDurability) {
throw new IllegalArgumentException(
"Regions provided are insufficient to meet the durability constraints");
}
}
this.featureProvider = featureProvider;
this.disallowBookiePlacementInRegionFeatureName =
conf.getString(REPP_DISALLOW_BOOKIE_PLACEMENT_IN_REGION_FEATURE_NAME);
this.disableDurabilityFeature = conf.getFeature(REPP_DISABLE_DURABILITY_ENFORCEMENT_FEATURE, null);
if (null == disableDurabilityFeature) {
this.disableDurabilityFeature =
featureProvider.getFeature(
conf.getString(REPP_DISABLE_DURABILITY_FEATURE_NAME,
BookKeeperConstants.FEATURE_REPP_DISABLE_DURABILITY_ENFORCEMENT));
}
return this;
}
protected List<BookieNode> selectRandomFromRegions(Set<String> availableRegions,
int numBookies,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble)
throws BKException.BKNotEnoughBookiesException {
List<BookieNode> availableBookies = new ArrayList<BookieNode>();
for (BookieNode bookieNode: knownBookies.values()) {
if (availableRegions.contains(getLocalRegion(bookieNode))) {
availableBookies.add(bookieNode);
}
}
return selectRandomInternal(availableBookies, numBookies, excludeBookies, predicate, ensemble);
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata, Set<BookieId> excludedBookies)
throws BKException.BKNotEnoughBookiesException {
int effectiveMinRegionsForDurability = disableDurabilityFeature.isAvailable() ? 1 : minRegionsForDurability;
// All of these conditions indicate bad configuration
if (ackQuorumSize < effectiveMinRegionsForDurability) {
throw new IllegalArgumentException(
"Ack Quorum size provided are insufficient to meet the durability constraints");
} else if (ensembleSize < writeQuorumSize) {
throw new IllegalArgumentException(
"write quorum (" + writeQuorumSize + ") cannot exceed ensemble size (" + ensembleSize + ")");
} else if (writeQuorumSize < ackQuorumSize) {
throw new IllegalArgumentException(
"ack quorum (" + ackQuorumSize + ") cannot exceed write quorum size (" + writeQuorumSize + ")");
} else if (effectiveMinRegionsForDurability > 0) {
// We must survive the failure of numRegions - effectiveMinRegionsForDurability. When these
// regions have failed we would spread the replicas over the remaining
// effectiveMinRegionsForDurability regions; we have to make sure that the ack quorum is large
// enough such that there is a configuration for spreading the replicas across
// effectiveMinRegionsForDurability - 1 regions
if (ackQuorumSize <= (writeQuorumSize - (writeQuorumSize / effectiveMinRegionsForDurability))) {
throw new IllegalArgumentException("ack quorum (" + ackQuorumSize + ") "
+ "violates the requirement to satisfy durability constraints when running in degraded mode");
}
}
rwLock.readLock().lock();
try {
Set<BookieId> comprehensiveExclusionBookiesSet = addDefaultRackBookiesIfMinNumRacksIsEnforced(
excludedBookies);
Set<Node> excludeNodes = convertBookiesToNodes(comprehensiveExclusionBookiesSet);
List<String> availableRegions = new ArrayList<>();
for (Map.Entry<String, TopologyAwareEnsemblePlacementPolicy> entry : perRegionPlacement.entrySet()) {
String region = entry.getKey();
if (!entry.getValue().knownBookies.isEmpty() && (null == disallowBookiePlacementInRegionFeatureName
|| !featureProvider.scope(region).getFeature(disallowBookiePlacementInRegionFeatureName)
.isAvailable())) {
availableRegions.add(region);
}
}
int numRegionsAvailable = availableRegions.size();
// If we were unable to get region information or all regions are disallowed which is
// an invalid configuration; default to random selection from the set of nodes
if (numRegionsAvailable < 1) {
// We cant disallow all regions; if we did, raise an alert to draw attention
if (perRegionPlacement.keySet().size() >= 1) {
LOG.error("No regions available, invalid configuration");
}
List<BookieNode> bns = selectRandom(ensembleSize, excludeNodes, TruePredicate.INSTANCE,
EnsembleForReplacementWithNoConstraints.INSTANCE);
ArrayList<BookieId> addrs = new ArrayList<BookieId>(ensembleSize);
for (BookieNode bn : bns) {
addrs.add(bn.getAddr());
}
return PlacementResult.of(addrs,
isEnsembleAdheringToPlacementPolicy(
addrs, writeQuorumSize, ackQuorumSize));
}
// Single region, fall back to RackAwareEnsemblePlacement
if (numRegionsAvailable < 2) {
RRTopologyAwareCoverageEnsemble ensemble = new RRTopologyAwareCoverageEnsemble(ensembleSize,
writeQuorumSize, ackQuorumSize, REGIONID_DISTANCE_FROM_LEAVES,
effectiveMinRegionsForDurability > 0 ? new HashSet<>(perRegionPlacement.keySet()) : null,
effectiveMinRegionsForDurability, minNumRacksPerWriteQuorum);
TopologyAwareEnsemblePlacementPolicy nextPolicy = perRegionPlacement.get(
availableRegions.iterator().next());
return nextPolicy.newEnsemble(ensembleSize, writeQuorumSize, writeQuorumSize,
comprehensiveExclusionBookiesSet, ensemble, ensemble);
}
int remainingEnsemble = ensembleSize;
int remainingWriteQuorum = writeQuorumSize;
// Equally distribute the nodes across all regions to whatever extent possible
// with the hierarchy in mind
// Try and place as many nodes in a region as possible, the ones that cannot be
// accommodated are placed on other regions
// Within each region try and follow rack aware placement
Map<String, Pair<Integer, Integer>> regionsWiseAllocation = new HashMap<>();
for (String region: availableRegions) {
regionsWiseAllocation.put(region, Pair.of(0, 0));
}
int remainingEnsembleBeforeIteration;
int numRemainingRegions;
Set<String> regionsReachedMaxAllocation = new HashSet<String>();
RRTopologyAwareCoverageEnsemble ensemble;
do {
numRemainingRegions = numRegionsAvailable - regionsReachedMaxAllocation.size();
ensemble = new RRTopologyAwareCoverageEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize,
REGIONID_DISTANCE_FROM_LEAVES,
// We pass all regions we know off to the coverage ensemble as
// regardless of regions that are available; constraints are
// always applied based on all possible regions
effectiveMinRegionsForDurability > 0 ? new HashSet<>(perRegionPlacement.keySet()) : null,
effectiveMinRegionsForDurability, minNumRacksPerWriteQuorum);
remainingEnsembleBeforeIteration = remainingEnsemble;
int regionsToAllocate = numRemainingRegions;
int startRegionIndex = lastRegionIndex % numRegionsAvailable;
for (int i = 0; i < numRegionsAvailable; ++i) {
String region = availableRegions.get(startRegionIndex % numRegionsAvailable);
startRegionIndex++;
final Pair<Integer, Integer> currentAllocation = regionsWiseAllocation.get(region);
TopologyAwareEnsemblePlacementPolicy policyWithinRegion = perRegionPlacement.get(region);
if (!regionsReachedMaxAllocation.contains(region)) {
if (numRemainingRegions <= 0) {
LOG.error("Inconsistent State: This should never happen");
throw new BKException.BKNotEnoughBookiesException();
}
// try to place the bookies as balance as possible across all the regions
int addToEnsembleSize = Math.min(remainingEnsemble, remainingEnsemble / regionsToAllocate
+ (remainingEnsemble % regionsToAllocate == 0 ? 0 : 1));
boolean success = false;
while (addToEnsembleSize > 0) {
int addToWriteQuorum = Math.max(1, Math.min(remainingWriteQuorum,
Math.round(1.0f * writeQuorumSize * addToEnsembleSize / ensembleSize)));
// Temp ensemble will be merged back into the ensemble only if we are able to successfully
// allocate the target number of bookies in this region; if we fail because we dont have
// enough bookies; then we retry the process with a smaller target
RRTopologyAwareCoverageEnsemble tempEnsemble =
new RRTopologyAwareCoverageEnsemble(ensemble);
int newEnsembleSize = currentAllocation.getLeft() + addToEnsembleSize;
int newWriteQuorumSize = currentAllocation.getRight() + addToWriteQuorum;
try {
List<BookieId> allocated = policyWithinRegion
.newEnsemble(newEnsembleSize, newWriteQuorumSize, newWriteQuorumSize,
comprehensiveExclusionBookiesSet, tempEnsemble, tempEnsemble)
.getResult();
ensemble = tempEnsemble;
remainingEnsemble -= addToEnsembleSize;
remainingWriteQuorum -= addToWriteQuorum;
regionsWiseAllocation.put(region, Pair.of(newEnsembleSize, newWriteQuorumSize));
success = true;
regionsToAllocate--;
lastRegionIndex = startRegionIndex;
LOG.info("Region {} allocating bookies with ensemble size {} "
+ "and write quorum size {} : {}",
region, newEnsembleSize, newWriteQuorumSize, allocated);
break;
} catch (BKException.BKNotEnoughBookiesException exc) {
LOG.warn("Could not allocate {} bookies in region {}, try allocating {} bookies",
newEnsembleSize, region, (newEnsembleSize - 1));
addToEnsembleSize--;
}
}
// we couldn't allocate additional bookies from the region,
// it should have reached its max allocation.
if (!success) {
regionsReachedMaxAllocation.add(region);
}
}
if (regionsReachedMaxAllocation.contains(region)) {
if (currentAllocation.getLeft() > 0) {
LOG.info("Allocating {} bookies in region {} : ensemble {} exclude {}",
currentAllocation.getLeft(), region, comprehensiveExclusionBookiesSet, ensemble);
policyWithinRegion.newEnsemble(
currentAllocation.getLeft(),
currentAllocation.getRight(),
currentAllocation.getRight(),
comprehensiveExclusionBookiesSet,
ensemble,
ensemble);
LOG.info("Allocated {} bookies in region {} : {}",
currentAllocation.getLeft(), region, ensemble);
}
}
}
if (regionsReachedMaxAllocation.containsAll(regionsWiseAllocation.keySet())) {
break;
}
} while ((remainingEnsemble > 0) && (remainingEnsemble < remainingEnsembleBeforeIteration));
List<BookieId> bookieList = ensemble.toList();
if (ensembleSize != bookieList.size()) {
LOG.error("Not enough {} bookies are available to form an ensemble : {}.",
ensembleSize, bookieList);
throw new BKException.BKNotEnoughBookiesException();
}
if (enableValidation && !ensemble.validate()) {
LOG.error("Not enough {} bookies are available to form a valid ensemble : {}.",
ensembleSize, bookieList);
throw new BKException.BKNotEnoughBookiesException();
}
LOG.info("Bookies allocated successfully {}", ensemble);
List<BookieId> ensembleList = ensemble.toList();
return PlacementResult.of(ensembleList,
isEnsembleAdheringToPlacementPolicy(ensembleList, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKException.BKNotEnoughBookiesException {
rwLock.readLock().lock();
try {
boolean enforceDurability = enforceDurabilityInReplace && !disableDurabilityFeature.isAvailable();
int effectiveMinRegionsForDurability = enforceDurability ? minRegionsForDurability : 1;
Set<BookieId> comprehensiveExclusionBookiesSet = addDefaultRackBookiesIfMinNumRacksIsEnforced(
excludeBookies);
Set<Node> excludeNodes = convertBookiesToNodes(comprehensiveExclusionBookiesSet);
RRTopologyAwareCoverageEnsemble ensemble = new RRTopologyAwareCoverageEnsemble(ensembleSize,
writeQuorumSize,
ackQuorumSize,
REGIONID_DISTANCE_FROM_LEAVES,
effectiveMinRegionsForDurability > 0 ? new HashSet<String>(perRegionPlacement.keySet()) : null,
effectiveMinRegionsForDurability, minNumRacksPerWriteQuorum);
BookieNode bookieNodeToReplace = knownBookies.get(bookieToReplace);
if (null == bookieNodeToReplace) {
bookieNodeToReplace = createBookieNode(bookieToReplace);
}
excludeNodes.add(bookieNodeToReplace);
for (BookieId bookieAddress: currentEnsemble) {
if (bookieAddress.equals(bookieToReplace)) {
continue;
}
BookieNode bn = knownBookies.get(bookieAddress);
if (null == bn) {
bn = createBookieNode(bookieAddress);
}
excludeNodes.add(bn);
if (!ensemble.apply(bn, ensemble)) {
LOG.warn("Anomalous ensemble detected");
if (null != statsLogger) {
statsLogger.getCounter(REGION_AWARE_ANOMALOUS_ENSEMBLE).inc();
}
enforceDurability = false;
}
ensemble.addNode(bn);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Try to choose a new bookie to replace {}, excluding {}.", bookieToReplace,
excludeNodes);
}
// pick a candidate from same rack to replace
BookieNode candidate = replaceFromRack(bookieNodeToReplace, excludeNodes,
ensemble, ensemble, enforceDurability);
if (LOG.isDebugEnabled()) {
LOG.debug("Bookie {} is chosen to replace bookie {}.", candidate, bookieNodeToReplace);
}
BookieId candidateAddr = candidate.getAddr();
List<BookieId> newEnsemble = new ArrayList<BookieId>(currentEnsemble);
if (currentEnsemble.isEmpty()) {
/*
* in testing code there are test cases which would pass empty
* currentEnsemble
*/
newEnsemble.add(candidateAddr);
} else {
newEnsemble.set(currentEnsemble.indexOf(bookieToReplace), candidateAddr);
}
return PlacementResult.of(candidateAddr,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
protected BookieNode replaceFromRack(BookieNode bookieNodeToReplace,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean enforceDurability)
throws BKException.BKNotEnoughBookiesException {
Set<String> availableRegions = new HashSet<String>();
for (String region: perRegionPlacement.keySet()) {
if ((null == disallowBookiePlacementInRegionFeatureName)
|| !featureProvider.scope(region).getFeature(disallowBookiePlacementInRegionFeatureName)
.isAvailable()) {
availableRegions.add(region);
}
}
String regionForBookieToReplace = getLocalRegion(bookieNodeToReplace);
if (availableRegions.contains(regionForBookieToReplace)) {
TopologyAwareEnsemblePlacementPolicy regionPolicy = perRegionPlacement.get(regionForBookieToReplace);
if (null != regionPolicy) {
try {
// select one from local rack => it falls back to selecting a node from the region
// if the rack does not have an available node, selecting from the same region
// should not violate durability constraints so we can simply not have to check
// for that.
return regionPolicy.selectFromNetworkLocation(
bookieNodeToReplace.getNetworkLocation(),
excludeBookies,
TruePredicate.INSTANCE,
EnsembleForReplacementWithNoConstraints.INSTANCE,
true);
} catch (BKException.BKNotEnoughBookiesException e) {
LOG.warn("Failed to choose a bookie from {} : "
+ "excluded {}, fallback to choose bookie randomly from the cluster.",
bookieNodeToReplace.getNetworkLocation(), excludeBookies);
}
}
}
// randomly choose one from all the regions that are available, ignore the provided predicate if we are not
// enforcing durability.
return selectRandomFromRegions(availableRegions, 1,
excludeBookies,
enforceDurability ? predicate : TruePredicate.INSTANCE,
enforceDurability ? ensemble : EnsembleForReplacementWithNoConstraints.INSTANCE).get(0);
}
@Override
public final DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
if (UNKNOWN_REGION.equals(myRegion)) {
return super.reorderReadSequence(ensemble, bookiesHealthInfo, writeSet);
} else {
Map<Integer, String> writeSetWithRegion = new HashMap<>();
for (int i = 0; i < writeSet.size(); i++) {
int idx = writeSet.get(i);
writeSetWithRegion.put(idx, getRegion(ensemble.get(idx)));
}
return super.reorderReadSequenceWithRegion(ensemble, writeSet, writeSetWithRegion,
bookiesHealthInfo, true, myRegion, REMOTE_NODE_IN_REORDER_SEQUENCE);
}
}
@Override
public final DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
if (UNKNOWN_REGION.equals(myRegion)) {
return super.reorderReadLACSequence(ensemble, bookiesHealthInfo, writeSet);
}
DistributionSchedule.WriteSet finalList = reorderReadSequence(ensemble, bookiesHealthInfo, writeSet);
finalList.addMissingIndices(ensemble.size());
return finalList;
}
@Override
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
/**
* TODO: have to implement actual logic for this method for
* RegionAwareEnsemblePlacementPolicy. For now return true value.
*
* - https://github.com/apache/bookkeeper/issues/1898
*/
return PlacementPolicyAdherence.MEETS_STRICT;
}
}
| 324 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerMetadataUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.net.BookieId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utilities for working with ledger metadata.
*/
public class LedgerMetadataUtils {
static final Logger LOG = LoggerFactory.getLogger(LedgerMetadataUtils.class);
static List<BookieId> getCurrentEnsemble(LedgerMetadata metadata) {
return getLastEnsembleValue(metadata);
}
/**
* the entry id greater than the given entry-id at which the next ensemble change takes
* place.
*
* @param entryId
* @return the entry id of the next ensemble change (-1 if no further ensemble changes)
*/
static long getNextEnsembleChange(LedgerMetadata metadata, long entryId) {
SortedMap<Long, ? extends List<BookieId>> tailMap = metadata.getAllEnsembles().tailMap(entryId + 1);
if (tailMap.isEmpty()) {
return -1;
} else {
return tailMap.firstKey();
}
}
static Set<BookieId> getBookiesInThisLedger(LedgerMetadata metadata) {
Set<BookieId> bookies = new HashSet<BookieId>();
for (List<BookieId> ensemble : metadata.getAllEnsembles().values()) {
bookies.addAll(ensemble);
}
return bookies;
}
static List<BookieId> getLastEnsembleValue(LedgerMetadata metadata) {
checkArgument(!metadata.getAllEnsembles().isEmpty(), "Metadata should never be created with no ensembles");
return metadata.getAllEnsembles().lastEntry().getValue();
}
static Long getLastEnsembleKey(LedgerMetadata metadata) {
checkArgument(!metadata.getAllEnsembles().isEmpty(), "Metadata should never be created with no ensembles");
return metadata.getAllEnsembles().lastKey();
}
public static boolean shouldStoreCtime(LedgerMetadata metadata) {
return metadata instanceof LedgerMetadataImpl && ((LedgerMetadataImpl) metadata).shouldStoreCtime();
}
}
| 325 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerMetadataBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static org.apache.bookkeeper.meta.LedgerMetadataSerDe.CURRENT_METADATA_FORMAT_VERSION;
import static org.apache.bookkeeper.meta.LedgerMetadataSerDe.METADATA_FORMAT_VERSION_1;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
import org.apache.bookkeeper.client.api.DigestType;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.LedgerMetadata.State;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.LimitedPrivate;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.net.BookieId;
/**
* Builder for building LedgerMetadata objects.
*/
@LimitedPrivate
@Unstable
@VisibleForTesting
public class LedgerMetadataBuilder {
private long ledgerId = -1L;
private int metadataFormatVersion = CURRENT_METADATA_FORMAT_VERSION;
private int ensembleSize = 3;
private int writeQuorumSize = 3;
private int ackQuorumSize = 2;
private State state = State.OPEN;
private Optional<Long> lastEntryId = Optional.empty();
private Optional<Long> length = Optional.empty();
private TreeMap<Long, List<BookieId>> ensembles = new TreeMap<>();
private Optional<DigestType> digestType = Optional.empty();
private Optional<byte[]> password = Optional.empty();
private long ctime = -1;
private boolean storeCtime = false;
private Map<String, byte[]> customMetadata = Collections.emptyMap();
private static final long BLANK_CTOKEN = 0;
private long cToken = BLANK_CTOKEN;
public static LedgerMetadataBuilder create() {
return new LedgerMetadataBuilder();
}
public static LedgerMetadataBuilder from(LedgerMetadata other) {
LedgerMetadataBuilder builder = new LedgerMetadataBuilder();
builder.ledgerId = other.getLedgerId();
builder.metadataFormatVersion = other.getMetadataFormatVersion();
builder.ensembleSize = other.getEnsembleSize();
builder.writeQuorumSize = other.getWriteQuorumSize();
builder.ackQuorumSize = other.getAckQuorumSize();
builder.state = other.getState();
if (builder.state == State.CLOSED) {
builder.lastEntryId = Optional.of(other.getLastEntryId());
builder.length = Optional.of(other.getLength());
}
builder.ensembles.putAll(other.getAllEnsembles());
if (other.hasPassword()) {
builder.password = Optional.of(other.getPassword());
builder.digestType = Optional.of(other.getDigestType());
}
builder.ctime = other.getCtime();
/** Hack to get around fact that ctime was never versioned correctly */
builder.storeCtime = LedgerMetadataUtils.shouldStoreCtime(other);
builder.customMetadata = ImmutableMap.copyOf(other.getCustomMetadata());
return builder;
}
public LedgerMetadataBuilder withId(long ledgerId) {
this.ledgerId = ledgerId;
return this;
}
public LedgerMetadataBuilder withMetadataFormatVersion(int version) {
if (version < METADATA_FORMAT_VERSION_1 || version > CURRENT_METADATA_FORMAT_VERSION) {
return this;
}
this.metadataFormatVersion = version;
return this;
}
public LedgerMetadataBuilder withPassword(byte[] password) {
this.password = Optional.of(Arrays.copyOf(password, password.length));
return this;
}
public LedgerMetadataBuilder withDigestType(DigestType digestType) {
this.digestType = Optional.of(digestType);
return this;
}
public LedgerMetadataBuilder withEnsembleSize(int ensembleSize) {
checkState(ensembles.size() == 0, "Can only set ensemble size before adding ensembles to the builder");
this.ensembleSize = ensembleSize;
return this;
}
public LedgerMetadataBuilder withWriteQuorumSize(int writeQuorumSize) {
this.writeQuorumSize = writeQuorumSize;
return this;
}
public LedgerMetadataBuilder withAckQuorumSize(int ackQuorumSize) {
this.ackQuorumSize = ackQuorumSize;
return this;
}
public LedgerMetadataBuilder newEnsembleEntry(long firstEntry, List<BookieId> ensemble) {
checkArgument(ensemble.size() == ensembleSize,
"Size of passed in ensemble must match the ensembleSize of the builder");
checkArgument(ensembles.isEmpty() || firstEntry > ensembles.lastKey(),
"New entry must have a first entry greater than any existing ensemble key");
ensembles.put(firstEntry, ensemble);
return this;
}
public LedgerMetadataBuilder replaceEnsembleEntry(long firstEntry, List<BookieId> ensemble) {
checkArgument(ensemble.size() == ensembleSize,
"Size of passed in ensemble must match the ensembleSize of the builder");
checkArgument(ensembles.containsKey(firstEntry),
"Ensemble must replace an existing ensemble in the ensemble map");
ensembles.put(firstEntry, ensemble);
return this;
}
public LedgerMetadataBuilder withInRecoveryState() {
this.state = State.IN_RECOVERY;
return this;
}
public LedgerMetadataBuilder withClosedState() {
this.state = State.CLOSED;
return this;
}
public LedgerMetadataBuilder withLastEntryId(long lastEntryId) {
this.lastEntryId = Optional.of(lastEntryId);
return this;
}
public LedgerMetadataBuilder withLength(long length) {
this.length = Optional.of(length);
return this;
}
public LedgerMetadataBuilder withCustomMetadata(Map<String, byte[]> customMetadata) {
this.customMetadata = ImmutableMap.copyOf(customMetadata);
return this;
}
public LedgerMetadataBuilder withCreationTime(long ctime) {
this.ctime = ctime;
return this;
}
public LedgerMetadataBuilder storingCreationTime(boolean storing) {
this.storeCtime = storing;
return this;
}
public LedgerMetadataBuilder withCToken(long cToken) {
this.cToken = cToken;
return this;
}
public LedgerMetadata build() {
checkArgument(ledgerId >= 0, "Ledger id must be set");
checkArgument(ensembleSize >= writeQuorumSize, "Write quorum must be less or equal to ensemble size");
checkArgument(writeQuorumSize >= ackQuorumSize, "Write quorum must be greater or equal to ack quorum");
return new LedgerMetadataImpl(ledgerId, metadataFormatVersion,
ensembleSize, writeQuorumSize, ackQuorumSize,
state, lastEntryId, length, ensembles,
digestType, password, ctime, storeCtime,
cToken,
customMetadata);
}
}
| 326 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ClientInternalConf.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.Feature;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.feature.SettableFeatureProvider;
class ClientInternalConf {
final Feature disableEnsembleChangeFeature;
final boolean delayEnsembleChange;
final Optional<SpeculativeRequestExecutionPolicy> readSpeculativeRequestPolicy;
final Optional<SpeculativeRequestExecutionPolicy> readLACSpeculativeRequestPolicy;
final int explicitLacInterval;
final long waitForWriteSetMs;
final long addEntryQuorumTimeoutNanos;
final boolean enableParallelRecoveryRead;
final boolean enableReorderReadSequence;
final boolean enableStickyReads;
final int recoveryReadBatchSize;
final int throttleValue;
final int bookieFailureHistoryExpirationMSec;
final int maxAllowedEnsembleChanges;
final long timeoutMonitorIntervalSec;
final boolean enableBookieFailureTracking;
final boolean useV2WireProtocol;
final boolean enforceMinNumFaultDomainsForWrite;
static ClientInternalConf defaultValues() {
return fromConfig(new ClientConfiguration());
}
static ClientInternalConf fromConfig(ClientConfiguration conf) {
return fromConfigAndFeatureProvider(conf, SettableFeatureProvider.DISABLE_ALL);
}
static ClientInternalConf fromConfigAndFeatureProvider(ClientConfiguration conf,
FeatureProvider featureProvider) {
return new ClientInternalConf(conf, featureProvider);
}
private ClientInternalConf(ClientConfiguration conf,
FeatureProvider featureProvider) {
this.explicitLacInterval = conf.getExplictLacInterval();
this.enableReorderReadSequence = conf.isReorderReadSequenceEnabled();
this.enableParallelRecoveryRead = conf.getEnableParallelRecoveryRead();
this.recoveryReadBatchSize = conf.getRecoveryReadBatchSize();
this.waitForWriteSetMs = conf.getWaitTimeoutOnBackpressureMillis();
this.addEntryQuorumTimeoutNanos = TimeUnit.SECONDS.toNanos(conf.getAddEntryQuorumTimeout());
this.throttleValue = conf.getThrottleValue();
this.bookieFailureHistoryExpirationMSec = conf.getBookieFailureHistoryExpirationMSec();
this.disableEnsembleChangeFeature = featureProvider.getFeature(conf.getDisableEnsembleChangeFeatureName());
this.delayEnsembleChange = conf.getDelayEnsembleChange();
this.maxAllowedEnsembleChanges = conf.getMaxAllowedEnsembleChanges();
this.timeoutMonitorIntervalSec = conf.getTimeoutMonitorIntervalSec();
this.enableBookieFailureTracking = conf.getEnableBookieFailureTracking();
this.useV2WireProtocol = conf.getUseV2WireProtocol();
this.enableStickyReads = conf.isStickyReadsEnabled();
this.enforceMinNumFaultDomainsForWrite = conf.getEnforceMinNumFaultDomainsForWrite();
if (conf.getFirstSpeculativeReadTimeout() > 0) {
this.readSpeculativeRequestPolicy =
Optional.of(new DefaultSpeculativeRequestExecutionPolicy(
conf.getFirstSpeculativeReadTimeout(),
conf.getMaxSpeculativeReadTimeout(),
conf.getSpeculativeReadTimeoutBackoffMultiplier()));
} else {
this.readSpeculativeRequestPolicy = Optional.<SpeculativeRequestExecutionPolicy>empty();
}
if (conf.getFirstSpeculativeReadLACTimeout() > 0) {
this.readLACSpeculativeRequestPolicy =
Optional.of(new DefaultSpeculativeRequestExecutionPolicy(
conf.getFirstSpeculativeReadLACTimeout(),
conf.getMaxSpeculativeReadLACTimeout(),
conf.getSpeculativeReadLACTimeoutBackoffMultiplier()));
} else {
this.readLACSpeculativeRequestPolicy = Optional.<SpeculativeRequestExecutionPolicy>empty();
}
}
}
| 327 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/AsyncCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.client;
import java.util.Enumeration;
import org.apache.bookkeeper.common.annotation.InterfaceAudience;
import org.apache.bookkeeper.common.annotation.InterfaceStability;
/**
* Defines all the callback interfaces for the async operations in bookkeeper client.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface AsyncCallback {
/**
* Async Callback for adding entries to ledgers with latency information.
*
* @since 4.7
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
interface AddCallbackWithLatency {
/**
* Callback declaration which additionally passes quorum write complete latency.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param entryId
* entry identifier
* @param qwcLatency
* QuorumWriteComplete Latency
* @param ctx
* context object
*/
void addCompleteWithLatency(int rc, LedgerHandle lh, long entryId, long qwcLatency, Object ctx);
}
/**
* Async Callback for adding entries to ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface AddCallback extends AddCallbackWithLatency {
/**
* Callback to implement if latency information is not desired.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param entryId
* entry identifier
* @param ctx
* context object
*/
void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx);
/**
* Callback declaration which additionally passes quorum write complete latency.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param entryId
* entry identifier
* @param qwcLatency
* QuorumWriteComplete Latency
* @param ctx
* context object
*/
@Override
default void addCompleteWithLatency(int rc, LedgerHandle lh, long entryId, long qwcLatency, Object ctx) {
addComplete(rc, lh, entryId, ctx);
}
}
/**
* Async Callback for updating LAC for ledgers.
*
* @since 4.5
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface AddLacCallback {
/**
* Callback declaration.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param ctx
* context object
*/
void addLacComplete(int rc, LedgerHandle lh, Object ctx);
}
/**
* Async Callback for closing ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface CloseCallback {
/**
* Callback definition.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param ctx
* context object
*/
void closeComplete(int rc, LedgerHandle lh, Object ctx);
}
/**
* Async Callback for creating ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface CreateCallback {
/**
* Declaration of callback method.
*
* @param rc
* return status
* @param lh
* ledger handle
* @param ctx
* context object
*/
void createComplete(int rc, LedgerHandle lh, Object ctx);
}
/**
* Async Callback for opening ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface OpenCallback {
/**
* Callback for asynchronous call to open ledger.
*
* @param rc
* Return code
* @param lh
* ledger handle
* @param ctx
* context object
*/
void openComplete(int rc, LedgerHandle lh, Object ctx);
}
/**
* Async Callback for reading entries from ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface ReadCallback {
/**
* Callback declaration.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param seq
* sequence of entries
* @param ctx
* context object
*/
void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq,
Object ctx);
}
/**
* Async Callback for deleting ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface DeleteCallback {
/**
* Callback definition for delete operations.
*
* @param rc
* return code
* @param ctx
* context object
*/
void deleteComplete(int rc, Object ctx);
}
/**
* Async Callback for reading LAC for ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface ReadLastConfirmedCallback {
/**
* Callback definition for bookie recover operations.
*
* @param rc Return code
* @param lastConfirmed The entry id of the last confirmed write or
* {@link LedgerHandle#INVALID_ENTRY_ID INVALID_ENTRY_ID}
* if no entry has been confirmed
* @param ctx
* context object
*/
void readLastConfirmedComplete(int rc, long lastConfirmed, Object ctx);
}
/**
* Async Callback for long polling read request.
*
* @since 4.5
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface ReadLastConfirmedAndEntryCallback {
/**
* Callback definition for bookie operation that allows reading the last add confirmed
* along with an entry within the last add confirmed range.
*
* @param rc Return code
* @param lastConfirmed The entry id of the last confirmed write or
* {@link LedgerHandle#INVALID_ENTRY_ID INVALID_ENTRY_ID}
* if no entry has been confirmed
* @param entry The entry since the lastAddConfirmed entry that was specified when the request
* was initiated
* @param ctx context object
*/
void readLastConfirmedAndEntryComplete(int rc, long lastConfirmed, LedgerEntry entry, Object ctx);
}
/**
* Async Callback for recovering ledgers.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface RecoverCallback {
/**
* Callback definition for bookie recover operations.
*
* @param rc
* return code
* @param ctx
* context object
*/
void recoverComplete(int rc, Object ctx);
}
/**
* Async Callback for checking if a ledger is closed.
*
* @since 4.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
interface IsClosedCallback {
/**
* Callback definition for isClosed operation.
*
* @param rc
* return code
* @param isClosed
* true if ledger is closed
*/
void isClosedComplete(int rc, boolean isClosed, Object ctx);
}
}
| 328 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerOpenOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static org.apache.bookkeeper.client.BookKeeper.DigestType.fromApiDigestType;
import java.security.GeneralSecurityException;
import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadLastConfirmedCallback;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncOpenCallback;
import org.apache.bookkeeper.client.api.BKException.Code;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.ReadHandle;
import org.apache.bookkeeper.client.impl.OpenBuilderBase;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.OrderedGenericCallback;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encapsulates the ledger open operation.
*
*/
class LedgerOpenOp {
static final Logger LOG = LoggerFactory.getLogger(LedgerOpenOp.class);
final BookKeeper bk;
final long ledgerId;
final OpenCallback cb;
final Object ctx;
ReadOnlyLedgerHandle lh;
final byte[] passwd;
boolean doRecovery = true;
boolean administrativeOpen = false;
long startTime;
final OpStatsLogger openOpLogger;
final DigestType suggestedDigestType;
final boolean enableDigestAutodetection;
/**
* Constructor.
*
* @param bk
* @param ledgerId
* @param digestType Ignored if conf.getEnableDigestTypeAutodetection() is true
* @param passwd
* @param cb
* @param ctx
*/
public LedgerOpenOp(BookKeeper bk, BookKeeperClientStats clientStats,
long ledgerId, DigestType digestType, byte[] passwd,
OpenCallback cb, Object ctx) {
this.bk = bk;
this.ledgerId = ledgerId;
this.passwd = passwd;
this.cb = cb;
this.ctx = ctx;
this.enableDigestAutodetection = bk.getConf().getEnableDigestTypeAutodetection();
this.suggestedDigestType = digestType;
this.openOpLogger = clientStats.getOpenOpLogger();
}
public LedgerOpenOp(BookKeeper bk, BookKeeperClientStats clientStats,
long ledgerId, OpenCallback cb, Object ctx) {
this.bk = bk;
this.ledgerId = ledgerId;
this.cb = cb;
this.ctx = ctx;
this.passwd = bk.getConf().getBookieRecoveryPasswd();
this.administrativeOpen = true;
this.enableDigestAutodetection = false;
this.suggestedDigestType = bk.conf.getBookieRecoveryDigestType();
this.openOpLogger = clientStats.getOpenOpLogger();
}
/**
* Inititates the ledger open operation.
*/
public void initiate() {
startTime = MathUtils.nowInNano();
/**
* Asynchronously read the ledger metadata node.
*/
bk.getLedgerManager().readLedgerMetadata(ledgerId)
.thenAcceptAsync(this::openWithMetadata, bk.getScheduler().chooseThread(ledgerId))
.exceptionally(exception -> {
openComplete(BKException.getExceptionCode(exception), null);
return null;
});
}
/**
* Inititates the ledger open operation without recovery.
*/
public void initiateWithoutRecovery() {
this.doRecovery = false;
initiate();
}
private CompletableFuture<Void> closeLedgerHandleAsync() {
if (lh != null) {
return lh.closeAsync();
}
return CompletableFuture.completedFuture(null);
}
private void openWithMetadata(Versioned<LedgerMetadata> versionedMetadata) {
LedgerMetadata metadata = versionedMetadata.getValue();
final byte[] passwd;
// we should use digest type from metadata *ONLY* when:
// 1) digest type is stored in metadata
// 2) `autodetection` is enabled
DigestType digestType;
if (enableDigestAutodetection && metadata.hasPassword()) {
digestType = fromApiDigestType(metadata.getDigestType());
} else {
digestType = suggestedDigestType;
}
/* For an administrative open, the default passwords
* are read from the configuration, but if the metadata
* already contains passwords, use these instead. */
if (administrativeOpen && metadata.hasPassword()) {
passwd = metadata.getPassword();
digestType = fromApiDigestType(metadata.getDigestType());
} else {
passwd = this.passwd;
if (metadata.hasPassword()) {
if (!Arrays.equals(passwd, metadata.getPassword())) {
LOG.error("Provided passwd does not match that in metadata");
openComplete(BKException.Code.UnauthorizedAccessException, null);
return;
}
// if `digest auto detection` is enabled, ignore the suggested digest type, this allows digest type
// changes. e.g. moving from `crc32` to `crc32c`.
if (suggestedDigestType != fromApiDigestType(metadata.getDigestType()) && !enableDigestAutodetection) {
LOG.error("Provided digest does not match that in metadata");
openComplete(BKException.Code.DigestMatchException, null);
return;
}
}
}
// get the ledger metadata back
try {
lh = new ReadOnlyLedgerHandle(bk.getClientCtx(), ledgerId, versionedMetadata, digestType,
passwd, !doRecovery);
} catch (GeneralSecurityException e) {
LOG.error("Security exception while opening ledger: " + ledgerId, e);
openComplete(BKException.Code.DigestNotInitializedException, null);
return;
} catch (NumberFormatException e) {
LOG.error("Incorrectly entered parameter throttle: " + bk.getConf().getThrottleValue(), e);
openComplete(BKException.Code.IncorrectParameterException, null);
return;
}
if (metadata.isClosed()) {
// Ledger was closed properly
openComplete(BKException.Code.OK, lh);
return;
}
if (doRecovery) {
lh.recover(new OrderedGenericCallback<Void>(bk.getMainWorkerPool(), ledgerId) {
@Override
public void safeOperationComplete(int rc, Void result) {
if (rc == BKException.Code.OK) {
openComplete(BKException.Code.OK, lh);
} else {
closeLedgerHandleAsync().whenComplete((ignore, ex) -> {
if (ex != null) {
LOG.error("Ledger {} close failed", ledgerId, ex);
}
if (rc == BKException.Code.UnauthorizedAccessException
|| rc == BKException.Code.TimeoutException) {
openComplete(bk.getReturnRc(rc), null);
} else {
openComplete(bk.getReturnRc(BKException.Code.LedgerRecoveryException), null);
}
});
}
}
@Override
public String toString() {
return String.format("Recover(%d)", ledgerId);
}
});
} else {
lh.asyncReadLastConfirmed(new ReadLastConfirmedCallback() {
@Override
public void readLastConfirmedComplete(int rc,
long lastConfirmed, Object ctx) {
if (rc == BKException.Code.TimeoutException) {
closeLedgerHandleAsync().whenComplete((r, ex) -> {
if (ex != null) {
LOG.error("Ledger {} close failed", ledgerId, ex);
}
openComplete(bk.getReturnRc(rc), null);
});
} else if (rc != BKException.Code.OK) {
closeLedgerHandleAsync().whenComplete((r, ex) -> {
if (ex != null) {
LOG.error("Ledger {} close failed", ledgerId, ex);
}
openComplete(bk.getReturnRc(BKException.Code.ReadException), null);
});
} else {
lh.lastAddConfirmed = lh.lastAddPushed = lastConfirmed;
openComplete(BKException.Code.OK, lh);
}
}
}, null);
}
}
void openComplete(int rc, LedgerHandle lh) {
if (BKException.Code.OK != rc) {
openOpLogger.registerFailedEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
} else {
openOpLogger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
if (lh != null) { // lh is null in case of errors
lh.executeOrdered(() -> cb.openComplete(rc, lh, ctx));
} else {
cb.openComplete(rc, null, ctx);
}
}
static final class OpenBuilderImpl extends OpenBuilderBase {
private final BookKeeper bk;
OpenBuilderImpl(BookKeeper bookkeeper) {
this.bk = bookkeeper;
}
@Override
public CompletableFuture<ReadHandle> execute() {
CompletableFuture<ReadHandle> future = new CompletableFuture<>();
SyncOpenCallback callback = new SyncOpenCallback(future);
open(callback);
return future;
}
private void open(OpenCallback cb) {
final int validateRc = validate();
if (Code.OK != validateRc) {
cb.openComplete(validateRc, null, null);
return;
}
LedgerOpenOp op = new LedgerOpenOp(bk, bk.getClientCtx().getClientStats(),
ledgerId, fromApiDigestType(digestType),
password, cb, null);
ReentrantReadWriteLock closeLock = bk.getCloseLock();
closeLock.readLock().lock();
try {
if (bk.isClosed()) {
cb.openComplete(BKException.Code.ClientClosedException, null, null);
return;
}
if (recovery) {
op.initiate();
} else {
op.initiateWithoutRecovery();
}
} finally {
closeLock.readLock().unlock();
}
}
}
}
| 329 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperClientStats.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import org.apache.bookkeeper.client.impl.BookKeeperClientStatsImpl;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* List of constants for defining client stats names.
*/
public interface BookKeeperClientStats {
String CATEGORY_CLIENT = "client";
String CLIENT_SCOPE = "bookkeeper_client";
// Metadata Operations
String CREATE_OP = "LEDGER_CREATE";
String DELETE_OP = "LEDGER_DELETE";
String OPEN_OP = "LEDGER_OPEN";
String RECOVER_OP = "LEDGER_RECOVER";
String LEDGER_RECOVER_READ_ENTRIES = "LEDGER_RECOVER_READ_ENTRIES";
String LEDGER_RECOVER_ADD_ENTRIES = "LEDGER_RECOVER_ADD_ENTRIES";
String LEDGER_ENSEMBLE_BOOKIE_DISTRIBUTION = "LEDGER_ENSEMBLE_BOOKIE_DISTRIBUTION";
// Data Operations
String ADD_OP = "ADD_ENTRY";
String ADD_OP_UR = "ADD_ENTRY_UR"; // Under Replicated during AddEntry.
String FORCE_OP = "FORCE"; // Number of force ledger operations
String READ_OP = "READ_ENTRY";
// Corrupted entry (Digest Mismatch/ Under Replication) detected during ReadEntry
String READ_OP_DM = "READ_ENTRY_DM";
String WRITE_LAC_OP = "WRITE_LAC";
String READ_LAC_OP = "READ_LAC";
String READ_LAST_CONFIRMED_AND_ENTRY = "READ_LAST_CONFIRMED_AND_ENTRY";
String READ_LAST_CONFIRMED_AND_ENTRY_RESPONSE = "READ_LAST_CONFIRMED_AND_ENTRY_RESPONSE";
String PENDING_ADDS = "NUM_PENDING_ADD";
String ENSEMBLE_CHANGES = "NUM_ENSEMBLE_CHANGE";
String LAC_UPDATE_HITS = "LAC_UPDATE_HITS";
String LAC_UPDATE_MISSES = "LAC_UPDATE_MISSES";
String GET_BOOKIE_INFO_OP = "GET_BOOKIE_INFO";
String SPECULATIVE_READ_COUNT = "SPECULATIVE_READ_COUNT";
String READ_REQUESTS_REORDERED = "READ_REQUESTS_REORDERED";
String GET_LIST_OF_ENTRIES_OF_LEDGER_OP = "GET_LIST_OF_ENTRIES_OF_LEDGER";
// per channel stats
String CHANNEL_SCOPE = "per_channel_bookie_client";
String CHANNEL_READ_OP = "READ_ENTRY";
String CHANNEL_TIMEOUT_READ = "TIMEOUT_READ_ENTRY";
String CHANNEL_ADD_OP = "ADD_ENTRY";
String CHANNEL_TIMEOUT_ADD = "TIMEOUT_ADD_ENTRY";
String CHANNEL_WRITE_LAC_OP = "WRITE_LAC";
String CHANNEL_FORCE_OP = "FORCE";
String CHANNEL_TIMEOUT_WRITE_LAC = "TIMEOUT_WRITE_LAC";
String CHANNEL_TIMEOUT_FORCE = "TIMEOUT_FORCE";
String CHANNEL_READ_LAC_OP = "READ_LAC";
String CHANNEL_TIMEOUT_READ_LAC = "TIMEOUT_READ_LAC";
String TIMEOUT_GET_BOOKIE_INFO = "TIMEOUT_GET_BOOKIE_INFO";
String CHANNEL_START_TLS_OP = "START_TLS";
String CHANNEL_TIMEOUT_START_TLS_OP = "TIMEOUT_START_TLS";
String TIMEOUT_GET_LIST_OF_ENTRIES_OF_LEDGER = "TIMEOUT_GET_LIST_OF_ENTRIES_OF_LEDGER";
String NETTY_EXCEPTION_CNT = "NETTY_EXCEPTION_CNT";
String CLIENT_CHANNEL_WRITE_WAIT = "CLIENT_CHANNEL_WRITE_WAIT";
String CLIENT_CONNECT_TIMER = "CLIENT_CONNECT_TIMER";
String ADD_OP_OUTSTANDING = "ADD_OP_OUTSTANDING";
String READ_OP_OUTSTANDING = "READ_OP_OUTSTANDING";
String NETTY_OPS = "NETTY_OPS";
String ACTIVE_NON_TLS_CHANNEL_COUNTER = "ACTIVE_NON_TLS_CHANNEL_COUNTER";
String ACTIVE_TLS_CHANNEL_COUNTER = "ACTIVE_TLS_CHANNEL_COUNTER";
String FAILED_CONNECTION_COUNTER = "FAILED_CONNECTION_COUNTER";
String FAILED_TLS_HANDSHAKE_COUNTER = "FAILED_TLS_HANDSHAKE_COUNTER";
// placementpolicy stats
String NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK = "NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK";
String WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS = "WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS";
String WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS_LATENCY =
"WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS_LATENCY";
String WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS = "WRITE_TIME_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS";
String NUM_WRITABLE_BOOKIES_IN_DEFAULT_FAULTDOMAIN = "NUM_WRITABLE_BOOKIES_IN_DEFAULT_FAULTDOMAIN";
String BOOKIE_LABEL = "bookie";
OpStatsLogger getCreateOpLogger();
OpStatsLogger getOpenOpLogger();
OpStatsLogger getDeleteOpLogger();
OpStatsLogger getRecoverOpLogger();
OpStatsLogger getReadOpLogger();
OpStatsLogger getReadLacAndEntryOpLogger();
OpStatsLogger getReadLacAndEntryRespLogger();
OpStatsLogger getAddOpLogger();
OpStatsLogger getForceOpLogger();
OpStatsLogger getWriteLacOpLogger();
OpStatsLogger getReadLacOpLogger();
OpStatsLogger getRecoverAddCountLogger();
OpStatsLogger getRecoverReadCountLogger();
Counter getReadOpDmCounter();
Counter getAddOpUrCounter();
Counter getSpeculativeReadCounter();
Counter getEnsembleBookieDistributionCounter(String bookie);
Counter getEnsembleChangeCounter();
Counter getLacUpdateHitsCounter();
Counter getLacUpdateMissesCounter();
OpStatsLogger getClientChannelWriteWaitLogger();
OpStatsLogger getWriteDelayedDueToNotEnoughFaultDomainsLatency();
Counter getWriteDelayedDueToNotEnoughFaultDomains();
Counter getWriteTimedOutDueToNotEnoughFaultDomains();
void registerPendingAddsGauge(Gauge<Integer> gauge);
static BookKeeperClientStats newInstance(StatsLogger stats) {
return new BookKeeperClientStatsImpl(stats);
}
}
| 330 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ReadLastConfirmedOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.ByteBuf;
import java.util.List;
import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.apache.bookkeeper.proto.checksum.DigestManager.RecoveryData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulated the read last confirmed operation.
*
*/
class ReadLastConfirmedOp implements ReadEntryCallback {
static final Logger LOG = LoggerFactory.getLogger(ReadLastConfirmedOp.class);
private final long ledgerId;
private final byte[] ledgerKey;
private final BookieClient bookieClient;
private final DigestManager digestManager;
private int numResponsesPending;
private RecoveryData maxRecoveredData;
private volatile boolean completed = false;
private int lastSeenError = BKException.Code.ReadException;
private final LastConfirmedDataCallback cb;
private final DistributionSchedule.QuorumCoverageSet coverageSet;
private final List<BookieId> currentEnsemble;
/**
* Wrapper to get all recovered data from the request.
*/
interface LastConfirmedDataCallback {
void readLastConfirmedDataComplete(int rc, RecoveryData data);
}
public ReadLastConfirmedOp(BookieClient bookieClient,
DistributionSchedule schedule,
DigestManager digestManager,
long ledgerId,
List<BookieId> ensemble,
byte[] ledgerKey,
LastConfirmedDataCallback cb) {
this.cb = cb;
this.bookieClient = bookieClient;
this.maxRecoveredData = new RecoveryData(LedgerHandle.INVALID_ENTRY_ID, 0);
this.numResponsesPending = ensemble.size();
this.coverageSet = schedule.getCoverageSet();
this.currentEnsemble = ensemble;
this.ledgerId = ledgerId;
this.ledgerKey = ledgerKey;
this.digestManager = digestManager;
}
public void initiate() {
for (int i = 0; i < currentEnsemble.size(); i++) {
bookieClient.readEntry(currentEnsemble.get(i),
ledgerId,
BookieProtocol.LAST_ADD_CONFIRMED,
this, i, BookieProtocol.FLAG_NONE);
}
}
public void initiateWithFencing() {
for (int i = 0; i < currentEnsemble.size(); i++) {
bookieClient.readEntry(currentEnsemble.get(i),
ledgerId,
BookieProtocol.LAST_ADD_CONFIRMED,
this, i, BookieProtocol.FLAG_DO_FENCING,
ledgerKey);
}
}
@Override
public synchronized void readEntryComplete(final int rc, final long ledgerId, final long entryId,
final ByteBuf buffer, final Object ctx) {
int bookieIndex = (Integer) ctx;
// add the response to coverage set
coverageSet.addBookie(bookieIndex, rc);
numResponsesPending--;
boolean heardValidResponse = false;
if (rc == BKException.Code.OK) {
try {
RecoveryData recoveryData = digestManager.verifyDigestAndReturnLastConfirmed(buffer);
if (recoveryData.getLastAddConfirmed() > maxRecoveredData.getLastAddConfirmed()) {
maxRecoveredData = recoveryData;
}
heardValidResponse = true;
} catch (BKDigestMatchException e) {
// Too bad, this bookie didn't give us a valid answer, we
// still might be able to recover though so continue
LOG.error("Mac mismatch for ledger: " + ledgerId + ", entry: " + entryId
+ " while reading last entry from bookie: "
+ currentEnsemble.get(bookieIndex));
}
}
if (rc == BKException.Code.NoSuchLedgerExistsException || rc == BKException.Code.NoSuchEntryException) {
// this still counts as a valid response, e.g., if the client crashed without writing any entry
heardValidResponse = true;
}
if (rc == BKException.Code.UnauthorizedAccessException && !completed) {
cb.readLastConfirmedDataComplete(rc, maxRecoveredData);
completed = true;
}
if (!heardValidResponse && BKException.Code.OK != rc) {
lastSeenError = rc;
}
// other return codes dont count as valid responses
if (heardValidResponse
&& coverageSet.checkCovered()
&& !completed) {
completed = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Read Complete with enough validResponses for ledger: {}, entry: {}",
ledgerId, entryId);
}
cb.readLastConfirmedDataComplete(BKException.Code.OK, maxRecoveredData);
return;
}
if (numResponsesPending == 0 && !completed) {
LOG.error("While readLastConfirmed ledger: {} did not hear success responses from all quorums, {}",
ledgerId, coverageSet);
cb.readLastConfirmedDataComplete(lastSeenError, maxRecoveredData);
}
}
@VisibleForTesting
synchronized int getNumResponsesPending() {
return numResponsesPending;
}
}
| 331 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.Collection;
import java.util.Map;
interface WeightedRandomSelection<T> {
interface WeightedObject {
long getWeight();
}
void updateMap(Map<T, WeightedObject> map);
T getNextRandom();
T getNextRandom(Collection<T> selectedNodes);
void setMaxProbabilityMultiplier(int max);
}
| 332 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RoundRobinDistributionSchedule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableMap;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Map;
import lombok.Getter;
import org.apache.bookkeeper.net.BookieId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A specific {@link DistributionSchedule} that places entries in round-robin
* fashion. For ensemble size 3, and quorum size 2, Entry 0 goes to bookie 0 and
* 1, entry 1 goes to bookie 1 and 2, and entry 2 goes to bookie 2 and 0, and so
* on.
*
*/
public class RoundRobinDistributionSchedule implements DistributionSchedule {
private static final Logger LOG = LoggerFactory.getLogger(RoundRobinDistributionSchedule.class);
@Getter
private final int writeQuorumSize;
private final int ackQuorumSize;
private final int ensembleSize;
public RoundRobinDistributionSchedule(int writeQuorumSize, int ackQuorumSize, int ensembleSize) {
this.writeQuorumSize = writeQuorumSize;
this.ackQuorumSize = ackQuorumSize;
this.ensembleSize = ensembleSize;
}
@Override
public WriteSet getWriteSet(long entryId) {
return WriteSetImpl.create(ensembleSize, writeQuorumSize, entryId);
}
@Override
public int getWriteSetBookieIndex(long entryId, int writeSetIndex) {
return (int) (entryId + writeSetIndex) % ensembleSize;
}
@Override
public WriteSet getEnsembleSet(long entryId) {
// for long poll reads and force ledger , we are trying all the bookies in the ensemble
// so we create a `WriteSet` with `writeQuorumSize == ensembleSize`.
return WriteSetImpl.create(ensembleSize, ensembleSize /* writeQuorumSize */, entryId);
}
@VisibleForTesting
static WriteSet writeSetFromValues(Integer... values) {
WriteSetImpl writeSet = WriteSetImpl.create(0, 0, 0);
writeSet.setSize(values.length);
for (int i = 0; i < values.length; i++) {
writeSet.set(i, values[i]);
}
return writeSet;
}
private static class WriteSetImpl implements WriteSet {
int[] array = null;
int size;
private final Handle<WriteSetImpl> recyclerHandle;
private static final Recycler<WriteSetImpl> RECYCLER = new Recycler<WriteSetImpl>() {
@Override
protected WriteSetImpl newObject(
Recycler.Handle<WriteSetImpl> handle) {
return new WriteSetImpl(handle);
}
};
private WriteSetImpl(Handle<WriteSetImpl> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
static WriteSetImpl create(int ensembleSize,
int writeQuorumSize,
long entryId) {
WriteSetImpl writeSet = RECYCLER.get();
writeSet.reset(ensembleSize, writeQuorumSize, entryId);
return writeSet;
}
private void reset(int ensembleSize, int writeQuorumSize,
long entryId) {
setSize(writeQuorumSize);
for (int w = 0; w < writeQuorumSize; w++) {
set(w, (int) ((entryId + w) % ensembleSize));
}
}
private void setSize(int newSize) {
if (array == null) {
array = new int[newSize];
} else if (newSize > array.length) {
int[] newArray = new int[newSize];
System.arraycopy(array, 0,
newArray, 0, array.length);
array = newArray;
}
size = newSize;
}
@Override
public int size() {
return size;
}
@Override
public boolean contains(int i) {
return indexOf(i) != -1;
}
@Override
public int get(int i) {
checkBounds(i);
return array[i];
}
@Override
public int set(int i, int index) {
checkBounds(i);
int oldVal = array[i];
array[i] = index;
return oldVal;
}
@Override
public void sort() {
Arrays.sort(array, 0, size);
}
@Override
public int indexOf(int index) {
for (int j = 0; j < size; j++) {
if (array[j] == index) {
return j;
}
}
return -1;
}
@Override
public void addMissingIndices(int maxIndex) {
if (size < maxIndex) {
int oldSize = size;
setSize(maxIndex);
for (int i = 0, j = oldSize;
i < maxIndex && j < maxIndex; i++) {
if (!contains(i)) {
set(j, i);
j++;
}
}
}
}
@Override
public void moveAndShift(int from, int to) {
checkBounds(from);
checkBounds(to);
if (from > to) {
int tmp = array[from];
for (int i = from; i > to; i--) {
array[i] = array[i - 1];
}
array[to] = tmp;
} else if (from < to) {
int tmp = array[from];
for (int i = from; i < to; i++) {
array[i] = array[i + 1];
}
array[to] = tmp;
}
}
@Override
public void recycle() {
recyclerHandle.recycle(this);
}
@Override
public WriteSet copy() {
WriteSetImpl copy = RECYCLER.get();
copy.setSize(size);
for (int i = 0; i < size; i++) {
copy.set(i, array[i]);
}
return copy;
}
@Override
public int hashCode() {
int sum = 0;
for (int i = 0; i < size; i++) {
sum += sum * 31 + i;
}
return sum;
}
@Override
public boolean equals(Object other) {
if (other instanceof WriteSetImpl) {
WriteSetImpl o = (WriteSetImpl) other;
if (o.size() != size()) {
return false;
}
for (int i = 0; i < size(); i++) {
if (o.get(i) != get(i)) {
return false;
}
}
return true;
}
return false;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder("WriteSet[");
int i = 0;
for (; i < size() - 1; i++) {
b.append(get(i)).append(",");
}
b.append(get(i)).append("]");
return b.toString();
}
private void checkBounds(int i) {
if (i < 0 || i > size) {
throw new IndexOutOfBoundsException(
"Index " + i + " out of bounds, array size = " + size);
}
}
}
@Override
public AckSet getAckSet() {
return AckSetImpl.create(ensembleSize, writeQuorumSize, ackQuorumSize);
}
@Override
public AckSet getEnsembleAckSet() {
return AckSetImpl.create(ensembleSize, ensembleSize, ensembleSize);
}
private static class AckSetImpl implements AckSet {
private int writeQuorumSize;
private int ackQuorumSize;
private final BitSet ackSet = new BitSet();
// grows on reset()
private BookieId[] failureMap = new BookieId[0];
private final Handle<AckSetImpl> recyclerHandle;
private static final Recycler<AckSetImpl> RECYCLER = new Recycler<AckSetImpl>() {
@Override
protected AckSetImpl newObject(Recycler.Handle<AckSetImpl> handle) {
return new AckSetImpl(handle);
}
};
private AckSetImpl(Handle<AckSetImpl> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
static AckSetImpl create(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize) {
AckSetImpl ackSet = RECYCLER.get();
ackSet.reset(ensembleSize, writeQuorumSize, ackQuorumSize);
return ackSet;
}
private void reset(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize) {
this.ackQuorumSize = ackQuorumSize;
this.writeQuorumSize = writeQuorumSize;
ackSet.clear();
if (failureMap.length < ensembleSize) {
failureMap = new BookieId[ensembleSize];
}
Arrays.fill(failureMap, null);
}
@Override
public boolean completeBookieAndCheck(int bookieIndexHeardFrom) {
failureMap[bookieIndexHeardFrom] = null;
ackSet.set(bookieIndexHeardFrom);
return ackSet.cardinality() >= ackQuorumSize;
}
@Override
public boolean failBookieAndCheck(int bookieIndexHeardFrom,
BookieId address) {
ackSet.clear(bookieIndexHeardFrom);
failureMap[bookieIndexHeardFrom] = address;
return failed() > (writeQuorumSize - ackQuorumSize);
}
@Override
public Map<Integer, BookieId> getFailedBookies() {
ImmutableMap.Builder<Integer, BookieId> builder = new ImmutableMap.Builder<>();
for (int i = 0; i < failureMap.length; i++) {
if (failureMap[i] != null) {
builder.put(i, failureMap[i]);
}
}
return builder.build();
}
@Override
public boolean removeBookieAndCheck(int bookie) {
ackSet.clear(bookie);
failureMap[bookie] = null;
return ackSet.cardinality() >= ackQuorumSize;
}
@Override
public void recycle() {
recyclerHandle.recycle(this);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("ackQuorumSize", ackQuorumSize)
.add("ackSet", ackSet)
.add("failureMap", failureMap).toString();
}
private int failed() {
int count = 0;
for (int i = 0; i < failureMap.length; i++) {
if (failureMap[i] != null) {
count++;
}
}
return count;
}
}
private class RRQuorumCoverageSet implements QuorumCoverageSet {
private final int[] covered = new int[ensembleSize];
private RRQuorumCoverageSet() {
for (int i = 0; i < covered.length; i++) {
covered[i] = BKException.Code.UNINITIALIZED;
}
}
@Override
public synchronized void addBookie(int bookieIndexHeardFrom, int rc) {
covered[bookieIndexHeardFrom] = rc;
}
@Override
public synchronized boolean checkCovered() {
// now check if there are any write quorums, with |ackQuorum| nodes available
for (int i = 0; i < ensembleSize; i++) {
/* Nodes which have either responded with an error other than NoSuch{Entry,Ledger},
or have not responded at all. We cannot know if these nodes ever accepted a entry. */
int nodesUnknown = 0;
for (int j = 0; j < writeQuorumSize; j++) {
int nodeIndex = (i + j) % ensembleSize;
if (covered[nodeIndex] != BKException.Code.OK
&& covered[nodeIndex] != BKException.Code.NoSuchEntryException
&& covered[nodeIndex] != BKException.Code.NoSuchLedgerExistsException) {
nodesUnknown++;
}
}
/* If nodesUnknown is greater than the ack quorum size, then
it is possible those two unknown nodes accepted an entry which
we do not know about */
if (nodesUnknown >= ackQuorumSize) {
return false;
}
}
return true;
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("QuorumCoverage(e:").append(ensembleSize)
.append(",w:").append(writeQuorumSize)
.append(",a:").append(ackQuorumSize)
.append(") = [");
int i = 0;
for (; i < covered.length - 1; i++) {
buffer.append(covered[i]).append(", ");
}
buffer.append(covered[i]).append("]");
return buffer.toString();
}
}
@Override
public QuorumCoverageSet getCoverageSet() {
return new RRQuorumCoverageSet();
}
@Override
public boolean hasEntry(long entryId, int bookieIndex) {
for (int w = 0; w < writeQuorumSize; w++) {
if (bookieIndex == getWriteSetBookieIndex(entryId, w)) {
return true;
}
}
return false;
}
@Override
public BitSet getEntriesStripedToTheBookie(int bookieIndex, long startEntryId, long lastEntryId) {
if ((startEntryId < 0) || (lastEntryId < 0) || (bookieIndex < 0) || (bookieIndex >= ensembleSize)
|| (lastEntryId < startEntryId)) {
LOG.error(
"Illegal arguments for getEntriesStripedToTheBookie, bookieIndex : {},"
+ " ensembleSize : {}, startEntryId : {}, lastEntryId : {}",
bookieIndex, ensembleSize, startEntryId, lastEntryId);
throw new IllegalArgumentException("Illegal arguments for getEntriesStripedToTheBookie");
}
BitSet entriesStripedToTheBookie = new BitSet((int) (lastEntryId - startEntryId + 1));
for (long entryId = startEntryId; entryId <= lastEntryId; entryId++) {
int modValOfFirstReplica = (int) (entryId % ensembleSize);
int modValOfLastReplica = (int) ((entryId + writeQuorumSize - 1) % ensembleSize);
if (modValOfLastReplica >= modValOfFirstReplica) {
if ((bookieIndex >= modValOfFirstReplica) && (bookieIndex <= modValOfLastReplica)) {
entriesStripedToTheBookie.set((int) (entryId - startEntryId));
}
} else {
if ((bookieIndex >= modValOfFirstReplica) || (bookieIndex <= modValOfLastReplica)) {
entriesStripedToTheBookie.set((int) (entryId - startEntryId));
}
}
}
return entriesStripedToTheBookie;
}
}
| 333 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ZoneawareEnsemblePlacementPolicyImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_JOINED;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_LEFT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT;
import static org.apache.bookkeeper.client.BookKeeperClientStats.NUM_WRITABLE_BOOKIES_IN_DEFAULT_FAULTDOMAIN;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import io.netty.util.HashedWheelTimer;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.Configurable;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.net.NetworkTopology;
import org.apache.bookkeeper.net.NetworkTopologyImpl;
import org.apache.bookkeeper.net.Node;
import org.apache.bookkeeper.net.NodeBase;
import org.apache.bookkeeper.net.ScriptBasedMapping;
import org.apache.bookkeeper.net.StabilizeNetworkTopology;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple zoneaware ensemble placement policy.
*/
public class ZoneawareEnsemblePlacementPolicyImpl extends TopologyAwareEnsemblePlacementPolicy {
static final Logger LOG = LoggerFactory.getLogger(ZoneawareEnsemblePlacementPolicyImpl.class);
public static final String UNKNOWN_ZONE = "UnknownZone";
/*
* this defaultFaultDomain is used as placeholder network location for
* bookies for which network location can't be resolved. In
* ZoneawareEnsemblePlacementPolicyImpl zone is the fault domain and upgrade
* domain is logical concept to enable parallel patching by bringing down
* all the bookies in the upgrade domain.
*/
private String defaultFaultDomain = NetworkTopology.DEFAULT_ZONE_AND_UPGRADEDOMAIN;
protected ZoneAwareNodeLocation unresolvedNodeLocation = new ZoneAwareNodeLocation(
NetworkTopology.DEFAULT_ZONE, NetworkTopology.DEFAULT_UPGRADEDOMAIN);
private final Random rand;
protected StatsLogger statsLogger = null;
// Use a loading cache so slow bookies are expired. Use entryId as values.
protected Cache<BookieId, Long> slowBookies;
protected BookieNode myNode = null;
protected String myZone = null;
protected boolean reorderReadsRandom = false;
protected int stabilizePeriodSeconds = 0;
protected int reorderThresholdPendingRequests = 0;
protected int maxWeightMultiple;
protected int minNumZonesPerWriteQuorum;
protected int desiredNumZonesPerWriteQuorum;
protected boolean enforceStrictZoneawarePlacement;
protected HashedWheelTimer timer;
protected final ConcurrentMap<BookieId, ZoneAwareNodeLocation> address2NodePlacement;
@StatsDoc(name = FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT, help = "Counter for number of times"
+ " DNSResolverDecorator failed to resolve Network Location")
protected Counter failedToResolveNetworkLocationCounter = null;
@StatsDoc(name = NUM_WRITABLE_BOOKIES_IN_DEFAULT_FAULTDOMAIN, help = "Gauge for the number of writable"
+ " Bookies in default fault domain")
protected Gauge<Integer> numWritableBookiesInDefaultFaultDomain;
/**
* Zone and UpgradeDomain pair of a node.
*/
public static class ZoneAwareNodeLocation {
private final String zone;
private final String upgradeDomain;
private final String repString;
public ZoneAwareNodeLocation(String zone, String upgradeDomain) {
this.zone = zone;
this.upgradeDomain = upgradeDomain;
repString = zone + upgradeDomain;
}
public String getZone() {
return zone;
}
public String getUpgradeDomain() {
return upgradeDomain;
}
@Override
public int hashCode() {
return repString.hashCode();
}
@Override
public boolean equals(Object obj) {
return ((obj instanceof ZoneAwareNodeLocation)
&& repString.equals(((ZoneAwareNodeLocation) obj).repString));
}
}
ZoneawareEnsemblePlacementPolicyImpl() {
super();
address2NodePlacement = new ConcurrentHashMap<BookieId, ZoneAwareNodeLocation>();
rand = new Random(System.currentTimeMillis());
}
protected ZoneAwareNodeLocation getZoneAwareNodeLocation(BookieId addr) {
ZoneAwareNodeLocation nodeLocation = address2NodePlacement.get(addr);
if (null == nodeLocation) {
String networkLocation = resolveNetworkLocation(addr);
if (getDefaultFaultDomain().equals(networkLocation)) {
nodeLocation = unresolvedNodeLocation;
} else {
String[] parts = StringUtils.split(NodeBase.normalize(networkLocation), NodeBase.PATH_SEPARATOR);
if (parts.length != 2) {
nodeLocation = unresolvedNodeLocation;
} else {
nodeLocation = new ZoneAwareNodeLocation(NodeBase.PATH_SEPARATOR_STR + parts[0],
NodeBase.PATH_SEPARATOR_STR + parts[1]);
}
}
address2NodePlacement.putIfAbsent(addr, nodeLocation);
}
return nodeLocation;
}
protected ZoneAwareNodeLocation getZoneAwareNodeLocation(BookieNode node) {
if (null == node || null == node.getAddr()) {
return unresolvedNodeLocation;
}
return getZoneAwareNodeLocation(node.getAddr());
}
@Override
public EnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver, HashedWheelTimer timer, FeatureProvider featureProvider,
StatsLogger statsLogger, BookieAddressResolver bookieAddressResolver) {
this.statsLogger = statsLogger;
this.bookieAddressResolver = bookieAddressResolver;
this.timer = timer;
this.bookiesJoinedCounter = statsLogger.getOpStatsLogger(BOOKIES_JOINED);
this.bookiesLeftCounter = statsLogger.getOpStatsLogger(BOOKIES_LEFT);
this.failedToResolveNetworkLocationCounter = statsLogger.getCounter(FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT);
this.numWritableBookiesInDefaultFaultDomain = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
rwLock.readLock().lock();
try {
return topology.countNumOfAvailableNodes(getDefaultFaultDomain(), Collections.emptySet());
} finally {
rwLock.readLock().unlock();
}
}
};
this.statsLogger.registerGauge(NUM_WRITABLE_BOOKIES_IN_DEFAULT_FAULTDOMAIN,
numWritableBookiesInDefaultFaultDomain);
this.reorderThresholdPendingRequests = conf.getReorderThresholdPendingRequests();
this.isWeighted = conf.getDiskWeightBasedPlacementEnabled();
if (this.isWeighted) {
this.maxWeightMultiple = conf.getBookieMaxWeightMultipleForWeightBasedPlacement();
this.weightedSelection = new DynamicWeightedRandomSelectionImpl<BookieNode>(this.maxWeightMultiple);
LOG.info("Weight based placement with max multiple of {}", this.maxWeightMultiple);
} else {
LOG.info("Not weighted");
}
this.minNumZonesPerWriteQuorum = conf.getMinNumZonesPerWriteQuorum();
this.desiredNumZonesPerWriteQuorum = conf.getDesiredNumZonesPerWriteQuorum();
this.enforceStrictZoneawarePlacement = conf.getEnforceStrictZoneawarePlacement();
if (minNumZonesPerWriteQuorum > desiredNumZonesPerWriteQuorum) {
LOG.error(
"It is misconfigured, for ZoneawareEnsemblePlacementPolicy, minNumZonesPerWriteQuorum: {} cann't be"
+ " greater than desiredNumZonesPerWriteQuorum: {}",
minNumZonesPerWriteQuorum, desiredNumZonesPerWriteQuorum);
throw new IllegalArgumentException("minNumZonesPerWriteQuorum: " + minNumZonesPerWriteQuorum
+ " cann't be greater than desiredNumZonesPerWriteQuorum: " + desiredNumZonesPerWriteQuorum);
}
DNSToSwitchMapping actualDNSResolver;
if (optionalDnsResolver.isPresent()) {
actualDNSResolver = optionalDnsResolver.get();
} else {
String dnsResolverName = conf.getString(REPP_DNS_RESOLVER_CLASS, ScriptBasedMapping.class.getName());
actualDNSResolver = ReflectionUtils.newInstance(dnsResolverName, DNSToSwitchMapping.class);
actualDNSResolver.setBookieAddressResolver(bookieAddressResolver);
if (actualDNSResolver instanceof Configurable) {
((Configurable) actualDNSResolver).setConf(conf);
}
}
this.dnsResolver = new DNSResolverDecorator(actualDNSResolver, () -> this.getDefaultFaultDomain(),
failedToResolveNetworkLocationCounter);
dnsResolver.setBookieAddressResolver(bookieAddressResolver);
this.stabilizePeriodSeconds = conf.getNetworkTopologyStabilizePeriodSeconds();
// create the network topology
if (stabilizePeriodSeconds > 0) {
this.topology = new StabilizeNetworkTopology(timer, stabilizePeriodSeconds);
} else {
this.topology = new NetworkTopologyImpl();
}
try {
myNode = createDummyLocalBookieNode(InetAddress.getLocalHost().getHostAddress());
myZone = getZoneAwareNodeLocation(myNode).getZone();
} catch (IOException e) {
LOG.error("Failed to get local host address : ", e);
throw new RuntimeException(e);
}
LOG.info("Initialized zoneaware ensemble placement policy @ {} @ {} : {}.", myNode,
myNode.getNetworkLocation(), dnsResolver.getClass().getName());
slowBookies = CacheBuilder.newBuilder()
.expireAfterWrite(conf.getBookieFailureHistoryExpirationMSec(), TimeUnit.MILLISECONDS)
.build(new CacheLoader<BookieId, Long>() {
@Override
public Long load(BookieId key) throws Exception {
return -1L;
}
});
return this;
}
public ZoneawareEnsemblePlacementPolicyImpl withDefaultFaultDomain(String defaultFaultDomain) {
checkNotNull(defaultFaultDomain, "Default fault domain cannot be null");
String[] parts = StringUtils.split(NodeBase.normalize(defaultFaultDomain), NodeBase.PATH_SEPARATOR);
if (parts.length != 2) {
LOG.error("provided defaultFaultDomain: {} is not valid", defaultFaultDomain);
throw new IllegalArgumentException("invalid defaultFaultDomain");
} else {
unresolvedNodeLocation = new ZoneAwareNodeLocation(NodeBase.PATH_SEPARATOR_STR + parts[0],
NodeBase.PATH_SEPARATOR_STR + parts[1]);
}
this.defaultFaultDomain = defaultFaultDomain;
return this;
}
public String getDefaultFaultDomain() {
return defaultFaultDomain;
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Set<BookieId> excludeBookies,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Ensemble<BookieNode> parentEnsemble,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Predicate<BookieNode> parentPredicate)
throws BKNotEnoughBookiesException {
throw new UnsupportedOperationException(
"newEnsemble method with parentEnsemble and parentPredicate is not supported for "
+ "ZoneawareEnsemblePlacementPolicyImpl");
}
@Override
public BookieNode selectFromNetworkLocation(String networkLoc, Set<Node> excludeBookies,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Predicate<BookieNode> predicate,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Ensemble<BookieNode> ensemble,
boolean fallbackToRandom) throws BKNotEnoughBookiesException {
throw new UnsupportedOperationException(
"selectFromNetworkLocation is not supported for ZoneawareEnsemblePlacementPolicyImpl");
}
@Override
public BookieNode selectFromNetworkLocation(Set<String> excludeRacks, Set<Node> excludeBookies,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Predicate<BookieNode> predicate,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Ensemble<BookieNode> ensemble,
boolean fallbackToRandom) throws BKNotEnoughBookiesException {
throw new UnsupportedOperationException(
"selectFromNetworkLocation is not supported for ZoneawareEnsemblePlacementPolicyImpl");
}
@Override
public BookieNode selectFromNetworkLocation(String networkLoc, Set<String> excludeRacks, Set<Node> excludeBookies,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Predicate<BookieNode> predicate,
org.apache.bookkeeper.client.ITopologyAwareEnsemblePlacementPolicy.Ensemble<BookieNode> ensemble,
boolean fallbackToRandom) throws BKNotEnoughBookiesException {
throw new UnsupportedOperationException(
"selectFromNetworkLocation is not supported for ZoneawareEnsemblePlacementPolicyImpl");
}
@Override
public void uninitalize() {
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
if (enforceStrictZoneawarePlacement) {
if (ensembleSize % writeQuorumSize != 0) {
/*
* if ensembleSize is not multiple of writeQuorumSize, then the
* write quorums which are wrapped will have bookies from just
* minNumberOfZones though bookies are available from
* desiredNumZones.
*
* lets say for example - desiredZones = 3, minZones = 2,
* ensembleSize = 5, writeQuorumSize = 3, ackQuorumSize = 2
*
* z1, z2, z3, z1, z2 is a legal ensemble. (lets assume here z1
* represents a node belonging to zone z1)
*
* the writeQuorum for entry 3 will be z1, z2 and z1, since
* ackQuorumSize is 2, an entry could be written just to two
* bookies that belong to z1. If the zone z1 goes down then the
* entry could potentially be unavailable until the zone z1 has
* come back.
*
* Also, it is not ideal to allow combination which fallsback to
* minZones, when bookies are available from desiredNumZones.
*
* So prohibiting this combination of configuration.
*/
LOG.error("It is illegal for ensembleSize to be not multiple of"
+ " writeQuorumSize When StrictZoneawarePlacement is enabled");
throw new IllegalArgumentException("It is illegal for ensembleSize to be not multiple of"
+ " writeQuorumSize When StrictZoneawarePlacement is enabled");
}
if (writeQuorumSize <= minNumZonesPerWriteQuorum) {
/*
* if we allow writeQuorumSize <= minNumZonesPerWriteQuorum,
* then replaceBookie may fail to find a candidate to replace a
* node when a zone goes down.
*
* lets say for example - desiredZones = 3, minZones = 2,
* ensembleSize = 6, writeQuorumSize = 2, ackQuorumSize = 2
*
* z1, z2, z3, z1, z2, z3 is a legal ensemble. (lets assume here
* z1 represents a node belonging to zone z1)
*
* Now if Zone z2 goes down, you need to replace Index 1 and 4.
* To replace index 1, you need to find a zone that is not z1
* and Z3 which is not possible.
*
* So prohibiting this combination of configuration.
*/
LOG.error("It is illegal for writeQuorumSize to be lesser than or equal"
+ " to minNumZonesPerWriteQuorum When StrictZoneawarePlacement is enabled");
throw new IllegalArgumentException("It is illegal for writeQuorumSize to be lesser than or equal"
+ " to minNumZonesPerWriteQuorum When StrictZoneawarePlacement is enabled");
}
}
int desiredNumZonesPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, desiredNumZonesPerWriteQuorum);
List<BookieId> newEnsemble = new ArrayList<BookieId>(
Collections.nCopies(ensembleSize, null));
rwLock.readLock().lock();
try {
if (!enforceStrictZoneawarePlacement) {
return createNewEnsembleRandomly(newEnsemble, writeQuorumSize, ackQuorumSize, customMetadata,
excludeBookies);
}
Set<BookieId> comprehensiveExclusionBookiesSet = addDefaultFaultDomainBookies(excludeBookies);
for (int index = 0; index < ensembleSize; index++) {
BookieId selectedBookie = setBookieInTheEnsemble(ensembleSize, writeQuorumSize, newEnsemble,
newEnsemble, index, desiredNumZonesPerWriteQuorumForThisEnsemble,
comprehensiveExclusionBookiesSet);
comprehensiveExclusionBookiesSet.add(selectedBookie);
}
return PlacementResult.of(newEnsemble,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
int bookieToReplaceIndex = currentEnsemble.indexOf(bookieToReplace);
int desiredNumZonesPerWriteQuorumForThisEnsemble = (writeQuorumSize < desiredNumZonesPerWriteQuorum)
? writeQuorumSize : desiredNumZonesPerWriteQuorum;
List<BookieId> newEnsemble = new ArrayList<BookieId>(currentEnsemble);
rwLock.readLock().lock();
try {
if (!enforceStrictZoneawarePlacement) {
return selectBookieRandomly(newEnsemble, bookieToReplace, excludeBookies, writeQuorumSize,
ackQuorumSize);
}
Set<BookieId> comprehensiveExclusionBookiesSet = addDefaultFaultDomainBookies(excludeBookies);
comprehensiveExclusionBookiesSet.addAll(currentEnsemble);
BookieId candidateAddr = setBookieInTheEnsemble(ensembleSize, writeQuorumSize, currentEnsemble,
newEnsemble, bookieToReplaceIndex, desiredNumZonesPerWriteQuorumForThisEnsemble,
comprehensiveExclusionBookiesSet);
return PlacementResult.of(candidateAddr,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
private PlacementResult<List<BookieId>> createNewEnsembleRandomly(List<BookieId> newEnsemble,
int writeQuorumSize, int ackQuorumSize, Map<String, byte[]> customMetadata,
Set<BookieId> excludeBookies) throws BKNotEnoughBookiesException {
int ensembleSize = newEnsemble.size();
Set<BookieNode> bookiesToConsider = getBookiesToConsider(excludeBookies);
if (bookiesToConsider.size() < newEnsemble.size()) {
LOG.error("Not enough bookies are available to form ensemble of size: {}", newEnsemble.size());
throw new BKNotEnoughBookiesException();
}
for (int i = 0; i < ensembleSize; i++) {
BookieNode candidateNode = selectCandidateNode(bookiesToConsider);
newEnsemble.set(i, candidateNode.getAddr());
bookiesToConsider.remove(candidateNode);
}
return PlacementResult.of(newEnsemble,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
}
private PlacementResult<BookieId> selectBookieRandomly(List<BookieId> newEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies, int writeQuorumSize,
int ackQuorumSize) throws BKNotEnoughBookiesException {
Set<BookieId> bookiesToExcludeIncludingEnsemble = new HashSet<BookieId>(excludeBookies);
bookiesToExcludeIncludingEnsemble.addAll(newEnsemble);
Set<BookieNode> bookiesToConsider = getBookiesToConsider(bookiesToExcludeIncludingEnsemble);
int bookieToReplaceIndex = newEnsemble.indexOf(bookieToReplace);
if (bookiesToConsider.isEmpty()) {
LOG.error("There is no bookie available to replace a bookie");
throw new BKNotEnoughBookiesException();
}
BookieId candidateAddr = (selectCandidateNode(bookiesToConsider)).getAddr();
newEnsemble.set(bookieToReplaceIndex, candidateAddr);
return PlacementResult.of(candidateAddr,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
}
private Set<BookieNode> getBookiesToConsider(Set<BookieId> excludeBookies) {
Set<Node> leaves = topology.getLeaves(NodeBase.ROOT);
Set<BookieNode> bookiesToConsider = new HashSet<BookieNode>();
BookieNode bookieNode;
for (Node leaf : leaves) {
if (leaf instanceof BookieNode) {
bookieNode = ((BookieNode) leaf);
if (excludeBookies.contains(bookieNode.getAddr())) {
continue;
}
bookiesToConsider.add(bookieNode);
}
}
return bookiesToConsider;
}
/*
* This method finds the appropriate bookie for newEnsemble by finding
* bookie to replace at bookieToReplaceIndex in the currentEnsemble.
*
* It goes through following filtering process 1) Exclude zones of
* desiredNumZonesPerWriteQuorumForThisEnsemble neighboring nodes 2) Find
* bookies to consider by excluding zones (found from previous step) and
* excluding UDs of the zones to consider. 3) If it can't find eligible
* bookie, then keep reducing the number of neighboring nodes to
* minNumZonesPerWriteQuorum and repeat step 2. 4) If it still can't find
* eligible bookies then find the zones to exclude such that in a writeset
* there will be bookies from atleast minNumZonesPerWriteQuorum zones and
* repeat step 2 5) After getting the list of eligible candidates select a
* node randomly. 6) If step-4 couldn't find eligible candidates then throw
* BKNotEnoughBookiesException.
*
* Example: Ensemble:6 Qw:6 desiredNumZonesPerWriteQuorumForThisEnsemble:3
* minNumZonesPerWriteQuorum:2 The selection process is as follows:
*
* 1) Find bookies by excluding zones of
* (desiredNumZonesPerWriteQuorumForThisEnsemble -1) neighboring bookies on
* the left and and the right side of the bookieToReplaceIndex. i.e Zones of
* 2 bookies(3-1) on both sides of the index in question will be excluded to
* find bookies. 2) Get the set of zones of the bookies selected above. 3)
* Get the UpgradeDomains to exclude of the each zone selected above to make
* sure bookies of writeSets containing bookieToReplaceIndex are from
* different UD if they belong to same zone. 4) now from the zones selected
* in step 2, apply the filter of UDs to exclude found in previous step and
* get the eligible bookies. 5) If no bookie matches this filter, then
* instead of aiming for unique UDs, fallback to UDs to exclude such that if
* bookies are from same zone in the writeSets containing
* bookieToReplaceIndex then they must be atleast from 2 different UDs. 6)
* now from the zones selected in step 2, apply the filter of UDs to exclude
* found in previous step and get the eligible bookies. 7) If no bookie
* matches this filter, repeat from Step1 to Step6 by decreasing neighboring
* exclude zones from (desiredNumZonesPerWriteQuorumForThisEnsemble - 1),
* which is 2 to (minNumZonesPerWriteQuorum - 1), which is 1 8) If even
* after this, bookies are not found matching the criteria fallback to
* minNumZonesPerWriteQuorum, for this find the zones to exclude such that
* in writesets containing this bookieToReplaceIndex there will be bookies
* from atleast minNumZonesPerWriteQuorum zones, which is 2. 9) Get the set
* of the zones of the bookies by excluding zones selected above. 10) repeat
* Step3 to Step6. 11) After getting the list of eligible candidates select
* a node randomly. 12) If even after Step10 there are no eligible
* candidates then throw BKNotEnoughBookiesException.
*/
private BookieId setBookieInTheEnsemble(int ensembleSize, int writeQuorumSize,
List<BookieId> currentEnsemble, List<BookieId> newEnsemble, int bookieToReplaceIndex,
int desiredNumZonesPerWriteQuorumForThisEnsemble, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
BookieId bookieToReplace = currentEnsemble.get(bookieToReplaceIndex);
Set<String> zonesToExclude = null;
Set<BookieNode> bookiesToConsiderAfterExcludingZonesAndUDs = null;
for (int numberOfNeighborsToConsider = (desiredNumZonesPerWriteQuorumForThisEnsemble
- 1); numberOfNeighborsToConsider >= (minNumZonesPerWriteQuorum - 1); numberOfNeighborsToConsider--) {
zonesToExclude = getZonesOfNeighboringNodesInEnsemble(currentEnsemble, bookieToReplaceIndex,
(numberOfNeighborsToConsider));
bookiesToConsiderAfterExcludingZonesAndUDs = getBookiesToConsiderAfterExcludingZonesAndUDs(ensembleSize,
writeQuorumSize, currentEnsemble, bookieToReplaceIndex, excludeBookies, zonesToExclude);
if (!bookiesToConsiderAfterExcludingZonesAndUDs.isEmpty()) {
break;
}
}
if (bookiesToConsiderAfterExcludingZonesAndUDs.isEmpty()) {
zonesToExclude = getZonesToExcludeToMaintainMinZones(currentEnsemble, bookieToReplaceIndex,
writeQuorumSize);
bookiesToConsiderAfterExcludingZonesAndUDs = getBookiesToConsiderAfterExcludingZonesAndUDs(ensembleSize,
writeQuorumSize, currentEnsemble, bookieToReplaceIndex, excludeBookies, zonesToExclude);
}
if (bookiesToConsiderAfterExcludingZonesAndUDs.isEmpty()) {
LOG.error("Not enough bookies are available to replaceBookie : {} in ensemble : {} with excludeBookies {}.",
bookieToReplace, currentEnsemble, excludeBookies);
throw new BKNotEnoughBookiesException();
}
BookieId candidateAddr = selectCandidateNode(bookiesToConsiderAfterExcludingZonesAndUDs).getAddr();
newEnsemble.set(bookieToReplaceIndex, candidateAddr);
return candidateAddr;
}
/*
* this method should be called in readlock scope of 'rwLock'. This method
* returns a new set, by adding excludedBookies and bookies in
* defaultfaultdomain.
*/
protected Set<BookieId> addDefaultFaultDomainBookies(Set<BookieId> excludeBookies) {
Set<BookieId> comprehensiveExclusionBookiesSet = new HashSet<BookieId>(excludeBookies);
Set<Node> defaultFaultDomainLeaves = topology.getLeaves(getDefaultFaultDomain());
for (Node node : defaultFaultDomainLeaves) {
if (node instanceof BookieNode) {
comprehensiveExclusionBookiesSet.add(((BookieNode) node).getAddr());
} else {
LOG.error("found non-BookieNode: {} as leaf of defaultFaultDomain: {}", node, getDefaultFaultDomain());
}
}
return comprehensiveExclusionBookiesSet;
}
/*
* Select bookie randomly from the bookiesToConsiderAfterExcludingUDs set.
* If diskWeightBasedPlacement is enabled then it will select node randomly
* based on node weight.
*/
private BookieNode selectCandidateNode(Set<BookieNode> bookiesToConsiderAfterExcludingUDs) {
BookieNode candidate = null;
if (!this.isWeighted) {
int randSelIndex = rand.nextInt(bookiesToConsiderAfterExcludingUDs.size());
int ind = 0;
for (BookieNode bookieNode : bookiesToConsiderAfterExcludingUDs) {
if (ind == randSelIndex) {
candidate = bookieNode;
break;
}
ind++;
}
} else {
candidate = weightedSelection.getNextRandom(bookiesToConsiderAfterExcludingUDs);
}
return candidate;
}
private String getExcludedZonesString(Set<String> excludeZones) {
if (excludeZones.isEmpty()) {
return "";
}
StringBuilder excludedZonesString = new StringBuilder(NetworkTopologyImpl.INVERSE);
boolean firstZone = true;
for (String excludeZone : excludeZones) {
if (!firstZone) {
excludedZonesString.append(NetworkTopologyImpl.NODE_SEPARATOR);
}
excludedZonesString.append(excludeZone);
firstZone = false;
}
return excludedZonesString.toString();
}
private Set<BookieNode> getBookiesToConsider(String excludedZonesString, Set<BookieId> excludeBookies) {
Set<BookieNode> bookiesToConsider = new HashSet<BookieNode>();
Set<Node> leaves = topology.getLeaves(excludedZonesString);
for (Node leaf : leaves) {
BookieNode bookieNode = ((BookieNode) leaf);
if (excludeBookies.contains(bookieNode.getAddr())) {
continue;
}
bookiesToConsider.add(bookieNode);
}
return bookiesToConsider;
}
/*
* For the position of 'bookieToReplaceIndex' in currentEnsemble, get the
* set of bookies eligible by excluding the 'excludeZones' and
* 'excludeBookies'. After excluding excludeZones and excludeBookies, it
* would first try to exclude upgrade domains of neighboring nodes
* (writeset) so the bookie would be from completely new upgrade domain
* of a zone, if a writeset contains bookie from the zone. If Bookie is
* not found matching this criteria, then it will fallback to maintain min
* upgrade domains (two) from a zone, such that if multiple bookies in a
* write quorum are from the same zone then they will be spread across two
* upgrade domains.
*/
private Set<BookieNode> getBookiesToConsiderAfterExcludingZonesAndUDs(int ensembleSize, int writeQuorumSize,
List<BookieId> currentEnsemble, int bookieToReplaceIndex,
Set<BookieId> excludeBookies, Set<String> excludeZones) {
Set<BookieNode> bookiesToConsiderAfterExcludingZonesAndUDs = new HashSet<BookieNode>();
HashMap<String, Set<String>> excludingUDsOfZonesToConsider = new HashMap<String, Set<String>>();
Set<BookieNode> bookiesToConsiderAfterExcludingZones = getBookiesToConsider(
getExcludedZonesString(excludeZones), excludeBookies);
if (!bookiesToConsiderAfterExcludingZones.isEmpty()) {
Set<String> zonesToConsider = getZonesOfBookies(bookiesToConsiderAfterExcludingZones);
for (String zoneToConsider : zonesToConsider) {
Set<String> upgradeDomainsOfAZoneInNeighboringNodes = getUpgradeDomainsOfAZoneInNeighboringNodes(
currentEnsemble, bookieToReplaceIndex, writeQuorumSize, zoneToConsider);
excludingUDsOfZonesToConsider.put(zoneToConsider, upgradeDomainsOfAZoneInNeighboringNodes);
}
updateBookiesToConsiderAfterExcludingZonesAndUDs(bookiesToConsiderAfterExcludingZonesAndUDs,
bookiesToConsiderAfterExcludingZones, excludingUDsOfZonesToConsider);
/*
* If no eligible bookie is found, then instead of aiming for unique
* UDs, fallback to UDs to exclude such that if bookies are from
* same zone in the writeSets containing bookieToReplaceIndex then
* they must be atleast from 2 different UDs
*/
if (bookiesToConsiderAfterExcludingZonesAndUDs.isEmpty()) {
excludingUDsOfZonesToConsider.clear();
for (String zoneToConsider : zonesToConsider) {
Set<String> udsToExcludeToMaintainMinUDsInWriteQuorums =
getUDsToExcludeToMaintainMinUDsInWriteQuorums(currentEnsemble, bookieToReplaceIndex,
writeQuorumSize, zoneToConsider);
excludingUDsOfZonesToConsider.put(zoneToConsider, udsToExcludeToMaintainMinUDsInWriteQuorums);
}
updateBookiesToConsiderAfterExcludingZonesAndUDs(bookiesToConsiderAfterExcludingZonesAndUDs,
bookiesToConsiderAfterExcludingZones, excludingUDsOfZonesToConsider);
}
}
return bookiesToConsiderAfterExcludingZonesAndUDs;
}
/*
* Filter bookies which belong to excludingUDs of zones to consider from
* 'bookiesToConsider' set and add them to
* 'bookiesToConsiderAfterExcludingUDs' set.
*/
private void updateBookiesToConsiderAfterExcludingZonesAndUDs(Set<BookieNode> bookiesToConsiderAfterExcludingUDs,
Set<BookieNode> bookiesToConsider, HashMap<String, Set<String>> excludingUDsOfZonesToConsider) {
for (BookieNode bookieToConsider : bookiesToConsider) {
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieToConsider);
if (excludingUDsOfZonesToConsider.get(nodeLocation.getZone()).contains(nodeLocation.getUpgradeDomain())) {
continue;
}
bookiesToConsiderAfterExcludingUDs.add(bookieToConsider);
}
}
/*
* Gets the set of zones of neighboring nodes.
*/
private Set<String> getZonesOfNeighboringNodesInEnsemble(List<BookieId> currentEnsemble, int indexOfNode,
int numOfNeighboringNodes) {
Set<String> zonesOfNeighboringNodes = new HashSet<String>();
int ensembleSize = currentEnsemble.size();
for (int i = (-1 * numOfNeighboringNodes); i <= numOfNeighboringNodes; i++) {
if (i == 0) {
continue;
}
int index = (indexOfNode + i + ensembleSize) % ensembleSize;
BookieId addrofNode = currentEnsemble.get(index);
if (addrofNode == null) {
continue;
}
String zoneOfNode = getZoneAwareNodeLocation(addrofNode).getZone();
zonesOfNeighboringNodes.add(zoneOfNode);
}
return zonesOfNeighboringNodes;
}
/*
* This method returns set of zones to exclude for the position of
* 'indexOfNode', so that writequorums, containing this index, would have
* atleast minNumZonesPerWriteQuorum.
*/
private Set<String> getZonesToExcludeToMaintainMinZones(List<BookieId> currentEnsemble, int indexOfNode,
int writeQuorumSize) {
int ensSize = currentEnsemble.size();
Set<String> zonesToExclude = new HashSet<String>();
Set<String> zonesInWriteQuorum = new HashSet<String>();
for (int i = -(writeQuorumSize - 1); i <= 0; i++) {
zonesInWriteQuorum.clear();
for (int j = 0; j < writeQuorumSize; j++) {
int indexInEnsemble = (i + j + indexOfNode + ensSize) % ensSize;
if (indexInEnsemble == indexOfNode) {
continue;
}
BookieId bookieAddr = currentEnsemble.get(indexInEnsemble);
if (bookieAddr == null) {
continue;
}
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieAddr);
zonesInWriteQuorum.add(nodeLocation.getZone());
}
if (zonesInWriteQuorum.size() <= (minNumZonesPerWriteQuorum - 1)) {
zonesToExclude.addAll(zonesInWriteQuorum);
}
}
return zonesToExclude;
}
private Set<String> getZonesOfBookies(Collection<BookieNode> bookieNodes) {
Set<String> zonesOfBookies = new HashSet<String>();
for (BookieNode bookieNode : bookieNodes) {
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieNode);
zonesOfBookies.add(nodeLocation.getZone());
}
return zonesOfBookies;
}
/*
* Gets the set of upgradedomains of neighboring nodes (writeQuorumSize)
* which belong to this 'zone'.
*/
private Set<String> getUpgradeDomainsOfAZoneInNeighboringNodes(List<BookieId> currentEnsemble,
int indexOfNode, int writeQuorumSize, String zone) {
int ensSize = currentEnsemble.size();
Set<String> upgradeDomainsOfAZoneInNeighboringNodes = new HashSet<String>();
for (int i = -(writeQuorumSize - 1); i <= (writeQuorumSize - 1); i++) {
if (i == 0) {
continue;
}
int indexInEnsemble = (indexOfNode + i + ensSize) % ensSize;
BookieId bookieAddr = currentEnsemble.get(indexInEnsemble);
if (bookieAddr == null) {
continue;
}
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieAddr);
if (nodeLocation.getZone().equals(zone)) {
upgradeDomainsOfAZoneInNeighboringNodes.add(nodeLocation.getUpgradeDomain());
}
}
return upgradeDomainsOfAZoneInNeighboringNodes;
}
/*
* This method returns set of UpgradeDomains to exclude if a bookie from
* the 'zone' has to be selected for the position of 'indexOfNode', then if
* there are multiple bookies from the 'zone' in a write quorum then they
* will be atleast from minimum of two upgrade domains.
*/
private Set<String> getUDsToExcludeToMaintainMinUDsInWriteQuorums(List<BookieId> currentEnsemble,
int indexOfNode, int writeQuorumSize, String zone) {
int ensSize = currentEnsemble.size();
Set<String> upgradeDomainsToExclude = new HashSet<String>();
Set<String> upgradeDomainsOfThisZoneInWriteQuorum = new HashSet<String>();
for (int i = -(writeQuorumSize - 1); i <= 0; i++) {
upgradeDomainsOfThisZoneInWriteQuorum.clear();
for (int j = 0; j < writeQuorumSize; j++) {
int indexInEnsemble = (i + j + indexOfNode + ensSize) % ensSize;
if (indexInEnsemble == indexOfNode) {
continue;
}
BookieId bookieAddr = currentEnsemble.get(indexInEnsemble);
if (bookieAddr == null) {
continue;
}
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieAddr);
if (nodeLocation.getZone().equals(zone)) {
upgradeDomainsOfThisZoneInWriteQuorum.add(nodeLocation.getUpgradeDomain());
}
}
if (upgradeDomainsOfThisZoneInWriteQuorum.size() == 1) {
upgradeDomainsToExclude.addAll(upgradeDomainsOfThisZoneInWriteQuorum);
}
}
return upgradeDomainsToExclude;
}
@Override
public void registerSlowBookie(BookieId bookieSocketAddress, long entryId) {
// TODO Auto-generated method stub
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo, DistributionSchedule.WriteSet writeSet) {
return writeSet;
}
@Override
public DistributionSchedule.WriteSet reorderReadLACSequence(List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo, DistributionSchedule.WriteSet writeSet) {
DistributionSchedule.WriteSet retList = reorderReadSequence(ensemble, bookiesHealthInfo, writeSet);
retList.addMissingIndices(ensemble.size());
return retList;
}
/*
* In ZoneAwareEnsemblePlacementPolicy if bookies in the writeset are from
* 'desiredNumOfZones' then it is considered as MEETS_STRICT if they are
* from 'minNumOfZones' then it is considered as MEETS_SOFT otherwise
* considered as FAIL. Also in a writeset if there are multiple bookies from
* the same zone then they are expected to be from different upgrade
* domains.
*/
@Override
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
if (CollectionUtils.isEmpty(ensembleList)) {
return PlacementPolicyAdherence.FAIL;
}
PlacementPolicyAdherence placementPolicyAdherence = PlacementPolicyAdherence.MEETS_STRICT;
rwLock.readLock().lock();
try {
HashMap<String, Set<String>> bookiesLocationInWriteSet = new HashMap<String, Set<String>>();
HashMap<String, Integer> numOfBookiesInZones = new HashMap<String, Integer>();
BookieId bookieNode;
if (ensembleList.size() % writeQuorumSize != 0) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug(
"For ensemble: {}, ensembleSize: {} is not a multiple of writeQuorumSize: {}",
ensembleList, ensembleList.size(), writeQuorumSize);
}
return placementPolicyAdherence;
}
if (writeQuorumSize <= minNumZonesPerWriteQuorum) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug(
"For ensemble: {}, writeQuorumSize: {} is less than or equal to"
+ " minNumZonesPerWriteQuorum: {}",
ensembleList, writeQuorumSize, minNumZonesPerWriteQuorum);
}
return placementPolicyAdherence;
}
int desiredNumZonesPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, desiredNumZonesPerWriteQuorum);
for (int i = 0; i < ensembleList.size(); i++) {
bookiesLocationInWriteSet.clear();
numOfBookiesInZones.clear();
for (int j = 0; j < writeQuorumSize; j++) {
int indexOfNode = (i + j) % ensembleList.size();
bookieNode = ensembleList.get(indexOfNode);
ZoneAwareNodeLocation nodeLocation = getZoneAwareNodeLocation(bookieNode);
if (nodeLocation.equals(unresolvedNodeLocation)) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug("ensemble: {}, contains bookie: {} for which network location is unresolvable",
ensembleList, bookieNode);
}
return placementPolicyAdherence;
}
String zone = nodeLocation.getZone();
String upgradeDomain = nodeLocation.getUpgradeDomain();
Set<String> udsOfThisZoneInThisWriteSet = bookiesLocationInWriteSet.get(zone);
if (udsOfThisZoneInThisWriteSet == null) {
udsOfThisZoneInThisWriteSet = new HashSet<String>();
udsOfThisZoneInThisWriteSet.add(upgradeDomain);
bookiesLocationInWriteSet.put(zone, udsOfThisZoneInThisWriteSet);
numOfBookiesInZones.put(zone, 1);
} else {
udsOfThisZoneInThisWriteSet.add(upgradeDomain);
Integer numOfNodesInAZone = numOfBookiesInZones.get(zone);
numOfBookiesInZones.put(zone, (numOfNodesInAZone + 1));
}
}
if (numOfBookiesInZones.entrySet().size() < minNumZonesPerWriteQuorum) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug("in ensemble: {}, writeset starting at: {} doesn't contain bookies from"
+ " minNumZonesPerWriteQuorum: {}", ensembleList, i, minNumZonesPerWriteQuorum);
}
return placementPolicyAdherence;
} else if (numOfBookiesInZones.entrySet().size() >= desiredNumZonesPerWriteQuorumForThisEnsemble) {
if (!validateMinUDsAreMaintained(numOfBookiesInZones, bookiesLocationInWriteSet)) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug("in ensemble: {}, writeset starting at: {} doesn't maintain min of 2 UDs"
+ " when there are multiple bookies from the same zone.", ensembleList, i);
}
return placementPolicyAdherence;
}
} else {
if (!validateMinUDsAreMaintained(numOfBookiesInZones, bookiesLocationInWriteSet)) {
placementPolicyAdherence = PlacementPolicyAdherence.FAIL;
if (LOG.isDebugEnabled()) {
LOG.debug("in ensemble: {}, writeset starting at: {} doesn't maintain min of 2 UDs"
+ " when there are multiple bookies from the same zone.", ensembleList, i);
}
return placementPolicyAdherence;
}
if (placementPolicyAdherence == PlacementPolicyAdherence.MEETS_STRICT) {
placementPolicyAdherence = PlacementPolicyAdherence.MEETS_SOFT;
}
}
}
} finally {
rwLock.readLock().unlock();
}
return placementPolicyAdherence;
}
private boolean validateMinUDsAreMaintained(HashMap<String, Integer> numOfNodesInZones,
HashMap<String, Set<String>> nodesLocationInWriteSet) {
for (Entry<String, Integer> numOfNodesInZone : numOfNodesInZones.entrySet()) {
String zone = numOfNodesInZone.getKey();
Integer numOfNodesInThisZone = numOfNodesInZone.getValue();
if (numOfNodesInThisZone > 1) {
Set<String> udsOfThisZone = nodesLocationInWriteSet.get(zone);
if (udsOfThisZone.size() < 2) {
return false;
}
}
}
return true;
}
@Override
public boolean areAckedBookiesAdheringToPlacementPolicy(Set<BookieId> ackedBookies, int writeQuorumSize,
int ackQuorumSize) {
HashSet<String> zonesOfAckedBookies = new HashSet<>();
int minNumZonesPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumZonesPerWriteQuorum);
boolean areAckedBookiesAdheringToPlacementPolicy = false;
ReentrantReadWriteLock.ReadLock readLock = rwLock.readLock();
readLock.lock();
try {
for (BookieId ackedBookie : ackedBookies) {
zonesOfAckedBookies.add(getZoneAwareNodeLocation(ackedBookie).getZone());
}
areAckedBookiesAdheringToPlacementPolicy = ((zonesOfAckedBookies
.size() >= minNumZonesPerWriteQuorumForThisEnsemble) && (ackedBookies.size() >= ackQuorumSize));
if (LOG.isDebugEnabled()) {
LOG.debug(
"areAckedBookiesAdheringToPlacementPolicy returning {}, because number of ackedBookies = {},"
+ " number of Zones of ackedbookies = {},"
+ " number of minNumZonesPerWriteQuorumForThisEnsemble = {}",
areAckedBookiesAdheringToPlacementPolicy, ackedBookies.size(), zonesOfAckedBookies.size(),
minNumZonesPerWriteQuorumForThisEnsemble);
}
} finally {
readLock.unlock();
}
return areAckedBookiesAdheringToPlacementPolicy;
}
}
| 334 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TryReadLastConfirmedOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.buffer.ByteBuf;
import java.util.List;
import org.apache.bookkeeper.client.ReadLastConfirmedOp.LastConfirmedDataCallback;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
import org.apache.bookkeeper.proto.checksum.DigestManager.RecoveryData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This op is try to read last confirmed without involving quorum coverage checking.
* Use {@link ReadLastConfirmedOp} if you need quorum coverage checking.
*/
class TryReadLastConfirmedOp implements ReadEntryCallback {
static final Logger LOG = LoggerFactory.getLogger(TryReadLastConfirmedOp.class);
final LedgerHandle lh;
final BookieClient bookieClient;
final LastConfirmedDataCallback cb;
int numResponsesPending;
volatile boolean hasValidResponse = false;
volatile boolean completed = false;
RecoveryData maxRecoveredData;
final List<BookieId> currentEnsemble;
TryReadLastConfirmedOp(LedgerHandle lh, BookieClient bookieClient,
List<BookieId> ensemble, LastConfirmedDataCallback cb, long lac) {
this.lh = lh;
this.bookieClient = bookieClient;
this.cb = cb;
this.maxRecoveredData = new RecoveryData(lac, 0);
this.numResponsesPending = lh.getLedgerMetadata().getEnsembleSize();
this.currentEnsemble = ensemble;
}
public void initiate() {
for (int i = 0; i < currentEnsemble.size(); i++) {
bookieClient.readEntry(currentEnsemble.get(i),
lh.ledgerId,
BookieProtocol.LAST_ADD_CONFIRMED,
this, i, BookieProtocol.FLAG_NONE);
}
}
@Override
public void readEntryComplete(int rc, long ledgerId, long entryId, ByteBuf buffer, Object ctx) {
if (LOG.isTraceEnabled()) {
LOG.trace("TryReadLastConfirmed received response for (lid={}, eid={}) : {}",
ledgerId, entryId, rc);
}
int bookieIndex = (Integer) ctx;
numResponsesPending--;
if (BKException.Code.OK == rc) {
try {
RecoveryData recoveryData = lh.macManager.verifyDigestAndReturnLastConfirmed(buffer);
if (LOG.isTraceEnabled()) {
LOG.trace("Received lastAddConfirmed (lac={}, length={}) from bookie({}) for (lid={}).",
recoveryData.getLastAddConfirmed(), recoveryData.getLength(), bookieIndex, ledgerId);
}
if (recoveryData.getLastAddConfirmed() > maxRecoveredData.getLastAddConfirmed()) {
maxRecoveredData = recoveryData;
// callback immediately
cb.readLastConfirmedDataComplete(BKException.Code.OK, maxRecoveredData);
}
hasValidResponse = true;
} catch (BKException.BKDigestMatchException e) {
LOG.error("Mac mismatch for ledger: " + ledgerId + ", entry: " + entryId
+ " while reading last entry from bookie: "
+ currentEnsemble.get(bookieIndex));
}
} else if (BKException.Code.UnauthorizedAccessException == rc && !completed) {
cb.readLastConfirmedDataComplete(rc, maxRecoveredData);
completed = true;
} else if (BKException.Code.NoSuchLedgerExistsException == rc || BKException.Code.NoSuchEntryException == rc) {
hasValidResponse = true;
}
if (numResponsesPending == 0 && !completed) {
if (!hasValidResponse) {
// no success called
cb.readLastConfirmedDataComplete(BKException.Code.LedgerRecoveryException, maxRecoveredData);
} else {
// callback
cb.readLastConfirmedDataComplete(BKException.Code.OK, maxRecoveredData);
}
completed = true;
}
}
}
| 335 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import org.apache.bookkeeper.net.BookieId;
/**
* Represents the entries of a segment of a ledger which are stored on subset of
* bookies in the segments bookie ensemble.
*
* <p>Used for checking and recovery
*/
public class LedgerFragment {
private final Set<Integer> bookieIndexes;
private final List<BookieId> ensemble;
private final long firstEntryId;
private final long lastKnownEntryId;
private final long ledgerId;
private final DistributionSchedule schedule;
private final boolean isLedgerClosed;
private ReplicateType replicateType = ReplicateType.DATA_LOSS;
public LedgerFragment(LedgerHandle lh,
long firstEntryId,
long lastKnownEntryId,
Set<Integer> bookieIndexes) {
this.ledgerId = lh.getId();
this.firstEntryId = firstEntryId;
this.lastKnownEntryId = lastKnownEntryId;
this.bookieIndexes = bookieIndexes;
this.ensemble = lh.getLedgerMetadata().getEnsembleAt(firstEntryId);
this.schedule = lh.getDistributionSchedule();
SortedMap<Long, ? extends List<BookieId>> ensembles = lh
.getLedgerMetadata().getAllEnsembles();
// Check if the ledger fragment is closed has two conditions
// 1. The ledger is closed
// 2. This fragment is not the last fragment and this ledger's lastAddConfirm >= ensembles.lastKey() - 1.
// This case happens when the ledger's last ensemble is empty
this.isLedgerClosed = lh.getLedgerMetadata().isClosed()
|| (!ensemble.equals(ensembles.get(ensembles.lastKey()))
&& lh.getLastAddConfirmed() >= ensembles.lastKey() - 1);
}
public LedgerFragment(LedgerFragment lf, Set<Integer> subset) {
this.ledgerId = lf.ledgerId;
this.firstEntryId = lf.firstEntryId;
this.lastKnownEntryId = lf.lastKnownEntryId;
this.bookieIndexes = subset;
this.ensemble = lf.ensemble;
this.schedule = lf.schedule;
this.isLedgerClosed = lf.isLedgerClosed;
}
/**
* Return a ledger fragment contains subset of bookies.
*
* @param subset
* subset of bookies.
* @return ledger fragment contains subset of bookies
*/
public LedgerFragment subset(Set<Integer> subset) {
return new LedgerFragment(this, subset);
}
/**
* Returns true, if and only if the ledger fragment will never be modified
* by any of the clients in future, otherwise false. i.e,
* <ol>
* <li>If ledger is in closed state, then no other clients can modify this
* fragment.</li>
* <li>If ledger is not in closed state and the current fragment is not a
* last fragment, then no one will modify this fragment.</li>
* </ol>
*/
public boolean isClosed() {
return isLedgerClosed;
}
public long getLedgerId() {
return ledgerId;
}
public long getFirstEntryId() {
return firstEntryId;
}
public long getLastKnownEntryId() {
return lastKnownEntryId;
}
/**
* Gets the failedBookie address.
*/
public BookieId getAddress(int bookieIndex) {
return ensemble.get(bookieIndex);
}
public Set<BookieId> getAddresses() {
Set<BookieId> addresses = new HashSet<BookieId>();
for (int bookieIndex : bookieIndexes) {
addresses.add(ensemble.get(bookieIndex));
}
return addresses;
}
/**
* Gets the failedBookie index.
*/
public Set<Integer> getBookiesIndexes() {
return bookieIndexes;
}
/**
* Gets the first stored entry id of the fragment in failed bookies.
*
* @return entryId
*/
public long getFirstStoredEntryId() {
long firstEntry = LedgerHandle.INVALID_ENTRY_ID;
for (int bookieIndex : bookieIndexes) {
Long firstStoredEntryForBookie = getFirstStoredEntryId(bookieIndex);
if (firstStoredEntryForBookie != LedgerHandle.INVALID_ENTRY_ID) {
if (firstEntry == LedgerHandle.INVALID_ENTRY_ID) {
firstEntry = firstStoredEntryForBookie;
} else {
firstEntry = Math.min(firstEntry, firstStoredEntryForBookie);
}
}
}
return firstEntry;
}
/**
* Get the first stored entry id of the fragment in the given failed bookies.
*
* @param bookieIndex
* the bookie index in the ensemble.
* @return first stored entry id on the bookie.
*/
public Long getFirstStoredEntryId(int bookieIndex) {
long firstEntry = firstEntryId;
for (int i = 0; i < ensemble.size() && firstEntry <= lastKnownEntryId; i++) {
if (schedule.hasEntry(firstEntry, bookieIndex)) {
return firstEntry;
} else {
firstEntry++;
}
}
return LedgerHandle.INVALID_ENTRY_ID;
}
/**
* Gets the last stored entry id of the fragment in failed bookie.
*
* @return entryId
*/
public long getLastStoredEntryId() {
long lastEntry = LedgerHandle.INVALID_ENTRY_ID;
for (int bookieIndex : bookieIndexes) {
Long lastStoredEntryIdForBookie = getLastStoredEntryId(bookieIndex);
if (lastStoredEntryIdForBookie != LedgerHandle.INVALID_ENTRY_ID) {
if (lastEntry == LedgerHandle.INVALID_ENTRY_ID) {
lastEntry = lastStoredEntryIdForBookie;
} else {
lastEntry = Math.max(lastEntry, lastStoredEntryIdForBookie);
}
}
}
return lastEntry;
}
/**
* Get the last stored entry id of the fragment in the given failed bookie.
*
* @param bookieIndex
* the bookie index in the ensemble.
* @return first stored entry id on the bookie.
*/
public Long getLastStoredEntryId(int bookieIndex) {
long lastEntry = lastKnownEntryId;
for (int i = 0; i < ensemble.size() && lastEntry >= firstEntryId; i++) {
if (schedule.hasEntry(lastEntry, bookieIndex)) {
return lastEntry;
} else {
lastEntry--;
}
}
return LedgerHandle.INVALID_ENTRY_ID;
}
public boolean isStoredEntryId(long entryId, int bookieIndex) {
return schedule.hasEntry(entryId, bookieIndex);
}
/**
* Gets the ensemble of fragment.
*
* @return the ensemble for the segment which this fragment is a part of
*/
public List<BookieId> getEnsemble() {
return this.ensemble;
}
public ReplicateType getReplicateType() {
return replicateType;
}
public void setReplicateType(ReplicateType replicateType) {
this.replicateType = replicateType;
}
@Override
public String toString() {
return String.format("Fragment(LedgerID: %d, FirstEntryID: %d[%d], "
+ "LastKnownEntryID: %d[%d], Host: %s, Closed: %s, Type: %s)", ledgerId, firstEntryId,
getFirstStoredEntryId(), lastKnownEntryId, getLastStoredEntryId(),
getAddresses(), isLedgerClosed, replicateType);
}
/**
* ReplicateType.
*/
public enum ReplicateType {
DATA_LOSS,
DATA_NOT_ADHERING_PLACEMENT
}
}
| 336 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.WATCHER_SCOPE;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.channel.EventLoopGroup;
import io.netty.util.HashedWheelTimer;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.bookie.BookKeeperServerStats;
import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
import org.apache.bookkeeper.client.AsyncCallback.DeleteCallback;
import org.apache.bookkeeper.client.AsyncCallback.IsClosedCallback;
import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncCreateAdvCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncCreateCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncDeleteCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncOpenCallback;
import org.apache.bookkeeper.client.api.BookKeeperBuilder;
import org.apache.bookkeeper.client.api.CreateBuilder;
import org.apache.bookkeeper.client.api.DeleteBuilder;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.LedgersIterator;
import org.apache.bookkeeper.client.api.ListLedgersResult;
import org.apache.bookkeeper.client.api.ListLedgersResultBuilder;
import org.apache.bookkeeper.client.api.OpenBuilder;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorBuilder;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.conf.AbstractConfiguration;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.feature.SettableFeatureProvider;
import org.apache.bookkeeper.meta.CleanupLedgerManager;
import org.apache.bookkeeper.meta.LedgerIdGenerator;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRangeIterator;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.MetadataClientDriver;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookieClientImpl;
import org.apache.bookkeeper.proto.DataFormats;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.EventLoopUtil;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* BookKeeper client.
*
* <p>We assume there is one single writer to a ledger at any time.
*
* <p>There are four possible operations: start a new ledger, write to a ledger,
* read from a ledger and delete a ledger.
*
* <p>The exceptions resulting from synchronous calls and error code resulting from
* asynchronous calls can be found in the class {@link BKException}.
*/
public class BookKeeper implements org.apache.bookkeeper.client.api.BookKeeper {
private static final Logger LOG = LoggerFactory.getLogger(BookKeeper.class);
final EventLoopGroup eventLoopGroup;
private final ByteBufAllocator allocator;
// The stats logger for this client.
private final StatsLogger statsLogger;
private final BookKeeperClientStats clientStats;
private final double bookieQuarantineRatio;
// whether the event loop group is one we created, or is owned by whoever
// instantiated us
boolean ownEventLoopGroup = false;
final BookieClient bookieClient;
final BookieWatcherImpl bookieWatcher;
final OrderedExecutor mainWorkerPool;
final OrderedScheduler scheduler;
final HashedWheelTimer requestTimer;
final boolean ownTimer;
final FeatureProvider featureProvider;
final ScheduledExecutorService bookieInfoScheduler;
final MetadataClientDriver metadataDriver;
// Ledger manager responsible for how to store ledger meta data
final LedgerManagerFactory ledgerManagerFactory;
final LedgerManager ledgerManager;
final LedgerIdGenerator ledgerIdGenerator;
// Ensemble Placement Policy
final EnsemblePlacementPolicy placementPolicy;
BookieInfoReader bookieInfoReader;
final ClientConfiguration conf;
final ClientInternalConf internalConf;
// Close State
boolean closed = false;
final ReentrantReadWriteLock closeLock = new ReentrantReadWriteLock();
/**
* BookKeeper Client Builder to build client instances.
*
* @see BookKeeperBuilder
*/
public static class Builder {
final ClientConfiguration conf;
ZooKeeper zk = null;
EventLoopGroup eventLoopGroup = null;
ByteBufAllocator allocator = null;
StatsLogger statsLogger = NullStatsLogger.INSTANCE;
DNSToSwitchMapping dnsResolver = null;
HashedWheelTimer requestTimer = null;
FeatureProvider featureProvider = null;
Builder(ClientConfiguration conf) {
this.conf = conf;
}
/**
* Configure the bookkeeper client with a provided {@link EventLoopGroup}.
*
* @param f an external {@link EventLoopGroup} to use by the bookkeeper client.
* @return client builder.
* @deprecated since 4.5, use {@link #eventLoopGroup(EventLoopGroup)}
* @see #eventLoopGroup(EventLoopGroup)
*/
@Deprecated
public Builder setEventLoopGroup(EventLoopGroup f) {
eventLoopGroup = f;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link ZooKeeper} client.
*
* @param zk an external {@link ZooKeeper} client to use by the bookkeeper client.
* @return client builder.
* @deprecated since 4.5, use {@link #zk(ZooKeeper)}
* @see #zk(ZooKeeper)
*/
@Deprecated
public Builder setZookeeper(ZooKeeper zk) {
this.zk = zk;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link StatsLogger}.
*
* @param statsLogger an {@link StatsLogger} to use by the bookkeeper client to collect stats generated
* by the client.
* @return client builder.
* @deprecated since 4.5, use {@link #statsLogger(StatsLogger)}
* @see #statsLogger(StatsLogger)
*/
@Deprecated
public Builder setStatsLogger(StatsLogger statsLogger) {
this.statsLogger = statsLogger;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link EventLoopGroup}.
*
* @param f an external {@link EventLoopGroup} to use by the bookkeeper client.
* @return client builder.
* @since 4.5
*/
public Builder eventLoopGroup(EventLoopGroup f) {
eventLoopGroup = f;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link ByteBufAllocator}.
*
* @param allocator an external {@link ByteBufAllocator} to use by the bookkeeper client.
* @return client builder.
* @since 4.9
*/
public Builder allocator(ByteBufAllocator allocator) {
this.allocator = allocator;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link ZooKeeper} client.
*
* @param zk an external {@link ZooKeeper} client to use by the bookkeeper client.
* @return client builder.
* @since 4.5
*/
@Deprecated
public Builder zk(ZooKeeper zk) {
this.zk = zk;
return this;
}
/**
* Configure the bookkeeper client with a provided {@link StatsLogger}.
*
* @param statsLogger an {@link StatsLogger} to use by the bookkeeper client to collect stats generated
* by the client.
* @return client builder.
* @since 4.5
*/
public Builder statsLogger(StatsLogger statsLogger) {
this.statsLogger = statsLogger;
return this;
}
/**
* Configure the bookkeeper client to use the provided dns resolver {@link DNSToSwitchMapping}.
*
* @param dnsResolver dns resolver for placement policy to use for resolving network locations.
* @return client builder
* @since 4.5
*/
public Builder dnsResolver(DNSToSwitchMapping dnsResolver) {
this.dnsResolver = dnsResolver;
return this;
}
/**
* Configure the bookkeeper client to use a provided {@link HashedWheelTimer}.
*
* @param requestTimer request timer for client to manage timer related tasks.
* @return client builder
* @since 4.5
*/
public Builder requestTimer(HashedWheelTimer requestTimer) {
this.requestTimer = requestTimer;
return this;
}
/**
* Feature Provider.
*
* @param featureProvider
* @return
*/
public Builder featureProvider(FeatureProvider featureProvider) {
this.featureProvider = featureProvider;
return this;
}
public BookKeeper build() throws IOException, InterruptedException, BKException {
checkNotNull(statsLogger, "No stats logger provided");
return new BookKeeper(conf, zk, eventLoopGroup, allocator, statsLogger, dnsResolver, requestTimer,
featureProvider);
}
}
public static Builder forConfig(final ClientConfiguration conf) {
return new Builder(conf);
}
/**
* Create a bookkeeper client. A zookeeper client and a client event loop group
* will be instantiated as part of this constructor.
*
* @param servers
* A list of one of more servers on which zookeeper is running. The
* client assumes that the running bookies have been registered with
* zookeeper under the path
* {@link AbstractConfiguration#getZkAvailableBookiesPath()}
* @throws IOException
* @throws InterruptedException
*/
public BookKeeper(String servers) throws IOException, InterruptedException,
BKException {
this(new ClientConfiguration().setMetadataServiceUri("zk+null://" + servers + "/ledgers"));
}
/**
* Create a bookkeeper client using a configuration object.
* A zookeeper client and a client event loop group will be
* instantiated as part of this constructor.
*
* @param conf
* Client Configuration object
* @throws IOException
* @throws InterruptedException
*/
public BookKeeper(final ClientConfiguration conf)
throws IOException, InterruptedException, BKException {
this(conf, null, null, null, NullStatsLogger.INSTANCE,
null, null, null);
}
private static ZooKeeper validateZooKeeper(ZooKeeper zk) throws NullPointerException, IOException {
checkNotNull(zk, "No zookeeper instance provided");
if (!zk.getState().isConnected()) {
LOG.error("Unconnected zookeeper handle passed to bookkeeper");
throw new IOException(KeeperException.create(KeeperException.Code.CONNECTIONLOSS));
}
return zk;
}
private static EventLoopGroup validateEventLoopGroup(EventLoopGroup eventLoopGroup)
throws NullPointerException {
checkNotNull(eventLoopGroup, "No Event Loop Group provided");
return eventLoopGroup;
}
/**
* Create a bookkeeper client but use the passed in zookeeper client instead
* of instantiating one.
*
* @param conf
* Client Configuration object
* {@link ClientConfiguration}
* @param zk
* Zookeeper client instance connected to the zookeeper with which
* the bookies have registered
* @throws IOException
* @throws InterruptedException
*/
public BookKeeper(ClientConfiguration conf, ZooKeeper zk)
throws IOException, InterruptedException, BKException {
this(conf, validateZooKeeper(zk), null, null, NullStatsLogger.INSTANCE, null, null, null);
}
/**
* Create a bookkeeper client but use the passed in zookeeper client and
* client event loop group instead of instantiating those.
*
* @param conf
* Client Configuration Object
* {@link ClientConfiguration}
* @param zk
* Zookeeper client instance connected to the zookeeper with which
* the bookies have registered. The ZooKeeper client must be connected
* before it is passed to BookKeeper. Otherwise a KeeperException is thrown.
* @param eventLoopGroup
* An event loop group that will be used to create connections to the bookies
* @throws IOException
* @throws InterruptedException
* @throws BKException in the event of a bookkeeper connection error
*/
public BookKeeper(ClientConfiguration conf, ZooKeeper zk, EventLoopGroup eventLoopGroup)
throws IOException, InterruptedException, BKException {
this(conf, validateZooKeeper(zk), validateEventLoopGroup(eventLoopGroup), null, NullStatsLogger.INSTANCE,
null, null, null);
}
/**
* Constructor for use with the builder. Other constructors also use it.
*/
@SuppressWarnings("deprecation")
@VisibleForTesting
BookKeeper(ClientConfiguration conf,
ZooKeeper zkc,
EventLoopGroup eventLoopGroup,
ByteBufAllocator byteBufAllocator,
StatsLogger rootStatsLogger,
DNSToSwitchMapping dnsResolver,
HashedWheelTimer requestTimer,
FeatureProvider featureProvider)
throws IOException, InterruptedException, BKException {
this.conf = conf;
// initialize feature provider
if (null == featureProvider) {
this.featureProvider = SettableFeatureProvider.DISABLE_ALL;
} else {
this.featureProvider = featureProvider;
}
this.internalConf = ClientInternalConf.fromConfigAndFeatureProvider(conf, this.featureProvider);
// initialize resources
this.scheduler = OrderedScheduler.newSchedulerBuilder().numThreads(1).name("BookKeeperClientScheduler").build();
this.mainWorkerPool = OrderedExecutor.newBuilder()
.name("BookKeeperClientWorker")
.numThreads(conf.getNumWorkerThreads())
.statsLogger(rootStatsLogger)
.traceTaskExecution(conf.getEnableTaskExecutionStats())
.preserveMdcForTaskExecution(conf.getPreserveMdcForTaskExecution())
.traceTaskWarnTimeMicroSec(conf.getTaskExecutionWarnTimeMicros())
.enableBusyWait(conf.isBusyWaitEnabled())
.build();
// initialize stats logger
this.statsLogger = rootStatsLogger.scope(BookKeeperClientStats.CLIENT_SCOPE);
this.clientStats = BookKeeperClientStats.newInstance(this.statsLogger);
// initialize metadata driver
try {
String metadataServiceUriStr = conf.getMetadataServiceUri();
if (null != metadataServiceUriStr) {
this.metadataDriver = MetadataDrivers.getClientDriver(URI.create(metadataServiceUriStr));
} else {
checkNotNull(zkc, "No external zookeeper provided when no metadata service uri is found");
this.metadataDriver = MetadataDrivers.getClientDriver("zk");
}
this.metadataDriver.initialize(
conf,
scheduler,
rootStatsLogger,
Optional.ofNullable(zkc));
} catch (ConfigurationException ce) {
LOG.error("Failed to initialize metadata client driver using invalid metadata service uri", ce);
throw new IOException("Failed to initialize metadata client driver", ce);
} catch (MetadataException me) {
LOG.error("Encountered metadata exceptions on initializing metadata client driver", me);
throw new IOException("Failed to initialize metadata client driver", me);
}
// initialize event loop group
if (null == eventLoopGroup) {
this.eventLoopGroup = EventLoopUtil.getClientEventLoopGroup(conf,
new DefaultThreadFactory("bookkeeper-io"));
this.ownEventLoopGroup = true;
} else {
this.eventLoopGroup = eventLoopGroup;
this.ownEventLoopGroup = false;
}
if (byteBufAllocator != null) {
this.allocator = byteBufAllocator;
} else {
this.allocator = ByteBufAllocatorBuilder.create()
.poolingPolicy(conf.getAllocatorPoolingPolicy())
.poolingConcurrency(conf.getAllocatorPoolingConcurrency())
.outOfMemoryPolicy(conf.getAllocatorOutOfMemoryPolicy())
.leakDetectionPolicy(conf.getAllocatorLeakDetectionPolicy())
.build();
}
if (null == requestTimer) {
this.requestTimer = new HashedWheelTimer(
new ThreadFactoryBuilder().setNameFormat("BookieClientTimer-%d").build(),
conf.getTimeoutTimerTickDurationMs(), TimeUnit.MILLISECONDS,
conf.getTimeoutTimerNumTicks());
this.ownTimer = true;
} else {
this.requestTimer = requestTimer;
this.ownTimer = false;
}
BookieAddressResolver bookieAddressResolver = conf.getBookieAddressResolverEnabled()
? new DefaultBookieAddressResolver(metadataDriver.getRegistrationClient())
: new BookieAddressResolverDisabled();
if (dnsResolver != null) {
dnsResolver.setBookieAddressResolver(bookieAddressResolver);
}
// initialize the ensemble placement
this.placementPolicy = initializeEnsemblePlacementPolicy(conf,
dnsResolver, this.requestTimer, this.featureProvider, this.statsLogger, bookieAddressResolver);
this.bookieWatcher = new BookieWatcherImpl(
conf, this.placementPolicy, metadataDriver.getRegistrationClient(), bookieAddressResolver,
this.statsLogger.scope(WATCHER_SCOPE));
// initialize bookie client
this.bookieClient = new BookieClientImpl(conf, this.eventLoopGroup, this.allocator, this.mainWorkerPool,
scheduler, rootStatsLogger, this.bookieWatcher.getBookieAddressResolver());
if (conf.getDiskWeightBasedPlacementEnabled()) {
LOG.info("Weighted ledger placement enabled");
ThreadFactoryBuilder tFBuilder = new ThreadFactoryBuilder()
.setNameFormat("BKClientMetaDataPollScheduler-%d");
this.bookieInfoScheduler = Executors.newSingleThreadScheduledExecutor(tFBuilder.build());
this.bookieInfoReader = new BookieInfoReader(this, conf, this.bookieInfoScheduler);
this.bookieWatcher.initialBlockingBookieRead();
this.bookieInfoReader.start();
} else {
LOG.info("Weighted ledger placement is not enabled");
this.bookieInfoScheduler = null;
this.bookieInfoReader = new BookieInfoReader(this, conf, null);
this.bookieWatcher.initialBlockingBookieRead();
}
// initialize ledger manager
try {
this.ledgerManagerFactory =
this.metadataDriver.getLedgerManagerFactory();
} catch (MetadataException e) {
throw new IOException("Failed to initialize ledger manager factory", e);
}
this.ledgerManager = new CleanupLedgerManager(ledgerManagerFactory.newLedgerManager());
this.ledgerIdGenerator = ledgerManagerFactory.newLedgerIdGenerator();
this.bookieQuarantineRatio = conf.getBookieQuarantineRatio();
scheduleBookieHealthCheckIfEnabled(conf);
}
/**
* Allow to extend BookKeeper for mocking in unit tests.
*/
@VisibleForTesting
BookKeeper() {
conf = new ClientConfiguration();
internalConf = ClientInternalConf.fromConfig(conf);
statsLogger = NullStatsLogger.INSTANCE;
clientStats = BookKeeperClientStats.newInstance(statsLogger);
scheduler = null;
requestTimer = null;
metadataDriver = null;
placementPolicy = null;
ownTimer = false;
mainWorkerPool = null;
ledgerManagerFactory = null;
ledgerManager = null;
ledgerIdGenerator = null;
featureProvider = null;
eventLoopGroup = null;
bookieWatcher = null;
bookieInfoScheduler = null;
bookieClient = null;
allocator = UnpooledByteBufAllocator.DEFAULT;
bookieQuarantineRatio = 1.0;
}
protected EnsemblePlacementPolicy initializeEnsemblePlacementPolicy(ClientConfiguration conf,
DNSToSwitchMapping dnsResolver,
HashedWheelTimer timer,
FeatureProvider featureProvider,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver)
throws IOException {
try {
Class<? extends EnsemblePlacementPolicy> policyCls = conf.getEnsemblePlacementPolicy();
return ReflectionUtils.newInstance(policyCls).initialize(conf, Optional.ofNullable(dnsResolver),
timer, featureProvider, statsLogger, bookieAddressResolver);
} catch (ConfigurationException e) {
throw new IOException("Failed to initialize ensemble placement policy : ", e);
}
}
int getReturnRc(int rc) {
return getReturnRc(bookieClient, rc);
}
static int getReturnRc(BookieClient bookieClient, int rc) {
if (BKException.Code.OK == rc) {
return rc;
} else {
if (bookieClient.isClosed()) {
return BKException.Code.ClientClosedException;
} else {
return rc;
}
}
}
void scheduleBookieHealthCheckIfEnabled(ClientConfiguration conf) {
if (conf.isBookieHealthCheckEnabled()) {
scheduler.scheduleAtFixedRate(
() -> checkForFaultyBookies(),
conf.getBookieHealthCheckIntervalSeconds(),
conf.getBookieHealthCheckIntervalSeconds(),
TimeUnit.SECONDS);
}
}
void checkForFaultyBookies() {
List<BookieId> faultyBookies = bookieClient.getFaultyBookies();
if (faultyBookies.isEmpty()) {
return;
}
boolean isEnabled = false;
try {
isEnabled = metadataDriver.isHealthCheckEnabled().get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.error("Cannot verify if healthcheck is enabled", e);
} catch (ExecutionException e) {
LOG.error("Cannot verify if healthcheck is enabled", e.getCause());
}
if (!isEnabled) {
LOG.info("Health checks is currently disabled!");
bookieWatcher.releaseAllQuarantinedBookies();
return;
}
for (BookieId faultyBookie : faultyBookies) {
if (Math.random() <= bookieQuarantineRatio) {
bookieWatcher.quarantineBookie(faultyBookie);
statsLogger.getCounter(BookKeeperServerStats.BOOKIE_QUARANTINE).inc();
} else {
statsLogger.getCounter(BookKeeperServerStats.BOOKIE_QUARANTINE_SKIP).inc();
}
}
}
/**
* Returns ref to speculative read counter, needed in PendingReadOp.
*/
@VisibleForTesting
public LedgerManager getLedgerManager() {
return ledgerManager;
}
@VisibleForTesting
public LedgerManagerFactory getLedgerManagerFactory() {
return ledgerManagerFactory;
}
@VisibleForTesting
LedgerManager getUnderlyingLedgerManager() {
return ((CleanupLedgerManager) ledgerManager).getUnderlying();
}
@VisibleForTesting
LedgerIdGenerator getLedgerIdGenerator() {
return ledgerIdGenerator;
}
@VisibleForTesting
ReentrantReadWriteLock getCloseLock() {
return closeLock;
}
@VisibleForTesting
boolean isClosed() {
return closed;
}
@VisibleForTesting
BookieWatcher getBookieWatcher() {
return bookieWatcher;
}
public BookieAddressResolver getBookieAddressResolver() {
return bookieWatcher.getBookieAddressResolver();
}
public OrderedExecutor getMainWorkerPool() {
return mainWorkerPool;
}
@VisibleForTesting
OrderedScheduler getScheduler() {
return scheduler;
}
@VisibleForTesting
EnsemblePlacementPolicy getPlacementPolicy() {
return placementPolicy;
}
@VisibleForTesting
public MetadataClientDriver getMetadataClientDriver() {
return metadataDriver;
}
/**
* There are 3 digest types that can be used for verification. The CRC32 is
* cheap to compute but does not protect against byzantine bookies (i.e., a
* bookie might report fake bytes and a matching CRC32). The MAC code is more
* expensive to compute, but is protected by a password, i.e., a bookie can't
* report fake bytes with a mathching MAC unless it knows the password.
* The CRC32C, which use SSE processor instruction, has better performance than CRC32.
* Legacy DigestType for backward compatibility. If we want to add new DigestType,
* we should add it in here, client.api.DigestType and DigestType in DataFormats.proto.
* If the digest type is set/passed in as DUMMY, a dummy digest is added/checked.
* This DUMMY digest is mostly for test purposes or in situations/use-cases
* where digest is considered a overhead.
*/
public enum DigestType {
MAC, CRC32, CRC32C, DUMMY;
public static DigestType fromApiDigestType(org.apache.bookkeeper.client.api.DigestType digestType) {
switch (digestType) {
case MAC:
return DigestType.MAC;
case CRC32:
return DigestType.CRC32;
case CRC32C:
return DigestType.CRC32C;
case DUMMY:
return DigestType.DUMMY;
default:
throw new IllegalArgumentException("Unable to convert digest type " + digestType);
}
}
public static DataFormats.LedgerMetadataFormat.DigestType toProtoDigestType(DigestType digestType) {
switch (digestType) {
case MAC:
return DataFormats.LedgerMetadataFormat.DigestType.HMAC;
case CRC32:
return DataFormats.LedgerMetadataFormat.DigestType.CRC32;
case CRC32C:
return DataFormats.LedgerMetadataFormat.DigestType.CRC32C;
case DUMMY:
return DataFormats.LedgerMetadataFormat.DigestType.DUMMY;
default:
throw new IllegalArgumentException("Unable to convert digest type " + digestType);
}
}
public org.apache.bookkeeper.client.api.DigestType toApiDigestType() {
switch (this) {
case MAC:
return org.apache.bookkeeper.client.api.DigestType.MAC;
case CRC32:
return org.apache.bookkeeper.client.api.DigestType.CRC32;
case CRC32C:
return org.apache.bookkeeper.client.api.DigestType.CRC32C;
case DUMMY:
return org.apache.bookkeeper.client.api.DigestType.DUMMY;
default:
throw new IllegalArgumentException("Unable to convert digest type " + this);
}
}
}
protected ClientConfiguration getConf() {
return conf;
}
StatsLogger getStatsLogger() {
return statsLogger;
}
/**
* Get the BookieClient, currently used for doing bookie recovery.
*
* @return BookieClient for the BookKeeper instance.
*/
BookieClient getBookieClient() {
return bookieClient;
}
/**
* Retrieves BookieInfo from all the bookies in the cluster. It sends requests
* to all the bookies in parallel and returns the info from the bookies that responded.
* If there was an error in reading from any bookie, nothing will be returned for
* that bookie in the map.
* @return map
* A map of bookieSocketAddress to its BookiInfo
* @throws BKException
* @throws InterruptedException
*/
public Map<BookieId, BookieInfo> getBookieInfo() throws BKException, InterruptedException {
return bookieInfoReader.getBookieInfo();
}
/**
* Creates a new ledger asynchronously. To create a ledger, we need to specify
* the ensemble size, the quorum size, the digest type, a password, a callback
* implementation, and an optional control object. The ensemble size is how
* many bookies the entries should be striped among and the quorum size is the
* degree of replication of each entry. The digest type is either a MAC or a
* CRC. Note that the CRC option is not able to protect a client against a
* bookie that replaces an entry. The password is used not only to
* authenticate access to a ledger, but also to verify entries in ledgers.
*
* @param ensSize
* number of bookies over which to stripe entries
* @param writeQuorumSize
* number of bookies each entry will be written to. each of these bookies
* must acknowledge the entry before the call is completed.
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param cb
* createCallback implementation
* @param ctx
* optional control object
*/
public void asyncCreateLedger(final int ensSize,
final int writeQuorumSize,
final DigestType digestType,
final byte[] passwd, final CreateCallback cb, final Object ctx) {
asyncCreateLedger(ensSize, writeQuorumSize, writeQuorumSize,
digestType, passwd, cb, ctx, Collections.emptyMap());
}
/**
* Creates a new ledger asynchronously. Ledgers created with this call have
* a separate write quorum and ack quorum size. The write quorum must be larger than
* the ack quorum.
*
* <p>Separating the write and the ack quorum allows the BookKeeper client to continue
* writing when a bookie has failed but the failure has not yet been detected. Detecting
* a bookie has failed can take a number of seconds, as configured by the read timeout
* {@link ClientConfiguration#getReadTimeout()}. Once the bookie failure is detected,
* that bookie will be removed from the ensemble.
*
* <p>The other parameters match those of {@link #asyncCreateLedger(int, int, DigestType, byte[],
* AsyncCallback.CreateCallback, Object)}
*
* @param ensSize
* number of bookies over which to stripe entries
* @param writeQuorumSize
* number of bookies each entry will be written to
* @param ackQuorumSize
* number of bookies which must acknowledge an entry before the call is completed
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param cb
* createCallback implementation
* @param ctx
* optional control object
* @param customMetadata
* optional customMetadata that holds user specified metadata
*/
public void asyncCreateLedger(final int ensSize, final int writeQuorumSize, final int ackQuorumSize,
final DigestType digestType, final byte[] passwd,
final CreateCallback cb, final Object ctx, final Map<String, byte[]> customMetadata) {
if (writeQuorumSize < ackQuorumSize) {
throw new IllegalArgumentException("Write quorum must be larger than ack quorum");
}
closeLock.readLock().lock();
try {
if (closed) {
cb.createComplete(BKException.Code.ClientClosedException, null, ctx);
return;
}
new LedgerCreateOp(BookKeeper.this, ensSize, writeQuorumSize,
ackQuorumSize, digestType, passwd, cb, ctx,
customMetadata, WriteFlag.NONE, clientStats)
.initiate();
} finally {
closeLock.readLock().unlock();
}
}
/**
* Creates a new ledger. Default of 3 servers, and quorum of 2 servers.
*
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedger(DigestType digestType, byte[] passwd)
throws BKException, InterruptedException {
return createLedger(3, 2, digestType, passwd);
}
/**
* Synchronous call to create ledger. Parameters match those of
* {@link #asyncCreateLedger(int, int, DigestType, byte[],
* AsyncCallback.CreateCallback, Object)}
*
* @param ensSize
* @param qSize
* @param digestType
* @param passwd
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedger(int ensSize, int qSize,
DigestType digestType, byte[] passwd)
throws InterruptedException, BKException {
return createLedger(ensSize, qSize, qSize, digestType, passwd, Collections.emptyMap());
}
/**
* Synchronous call to create ledger. Parameters match those of
* {@link #asyncCreateLedger(int, int, DigestType, byte[],
* AsyncCallback.CreateCallback, Object)}
*
* @param ensSize
* @param writeQuorumSize
* @param ackQuorumSize
* @param digestType
* @param passwd
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedger(int ensSize, int writeQuorumSize, int ackQuorumSize,
DigestType digestType, byte[] passwd)
throws InterruptedException, BKException {
return createLedger(ensSize, writeQuorumSize, ackQuorumSize, digestType, passwd, Collections.emptyMap());
}
/**
* Synchronous call to create ledger. Parameters match those of asyncCreateLedger
*
* @param ensSize
* @param writeQuorumSize
* @param ackQuorumSize
* @param digestType
* @param passwd
* @param customMetadata
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedger(int ensSize, int writeQuorumSize, int ackQuorumSize,
DigestType digestType, byte[] passwd, final Map<String, byte[]> customMetadata)
throws InterruptedException, BKException {
CompletableFuture<LedgerHandle> future = new CompletableFuture<>();
SyncCreateCallback result = new SyncCreateCallback(future);
/*
* Calls asynchronous version
*/
asyncCreateLedger(ensSize, writeQuorumSize, ackQuorumSize, digestType, passwd,
result, null, customMetadata);
LedgerHandle lh = SyncCallbackUtils.waitForResult(future);
if (lh == null) {
LOG.error("Unexpected condition : no ledger handle returned for a success ledger creation");
throw BKException.create(BKException.Code.UnexpectedConditionException);
}
return lh;
}
/**
* Synchronous call to create ledger.
* Creates a new ledger asynchronously and returns {@link LedgerHandleAdv} which can accept entryId.
* Parameters must match those of asyncCreateLedgerAdv
*
* @param ensSize
* @param writeQuorumSize
* @param ackQuorumSize
* @param digestType
* @param passwd
*
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedgerAdv(int ensSize, int writeQuorumSize, int ackQuorumSize,
DigestType digestType, byte[] passwd)
throws InterruptedException, BKException {
return createLedgerAdv(ensSize, writeQuorumSize, ackQuorumSize,
digestType, passwd, Collections.emptyMap());
}
/**
* Synchronous call to create ledger.
* Creates a new ledger asynchronously and returns {@link LedgerHandleAdv} which can accept entryId.
* Parameters must match those of asyncCreateLedgerAdv
*
* @param ensSize
* @param writeQuorumSize
* @param ackQuorumSize
* @param digestType
* @param passwd
* @param customMetadata
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedgerAdv(int ensSize, int writeQuorumSize, int ackQuorumSize,
DigestType digestType, byte[] passwd, final Map<String, byte[]> customMetadata)
throws InterruptedException, BKException {
CompletableFuture<LedgerHandleAdv> future = new CompletableFuture<>();
SyncCreateAdvCallback result = new SyncCreateAdvCallback(future);
/*
* Calls asynchronous version
*/
asyncCreateLedgerAdv(ensSize, writeQuorumSize, ackQuorumSize, digestType, passwd,
result, null, customMetadata);
LedgerHandle lh = SyncCallbackUtils.waitForResult(future);
if (lh == null) {
LOG.error("Unexpected condition : no ledger handle returned for a success ledger creation");
throw BKException.create(BKException.Code.UnexpectedConditionException);
}
return lh;
}
/**
* Creates a new ledger asynchronously and returns {@link LedgerHandleAdv}
* which can accept entryId. Ledgers created with this call have ability to accept
* a separate write quorum and ack quorum size. The write quorum must be larger than
* the ack quorum.
*
* <p>Separating the write and the ack quorum allows the BookKeeper client to continue
* writing when a bookie has failed but the failure has not yet been detected. Detecting
* a bookie has failed can take a number of seconds, as configured by the read timeout
* {@link ClientConfiguration#getReadTimeout()}. Once the bookie failure is detected,
* that bookie will be removed from the ensemble.
*
* <p>The other parameters match those of {@link #asyncCreateLedger(int, int, DigestType, byte[],
* AsyncCallback.CreateCallback, Object)}
*
* @param ensSize
* number of bookies over which to stripe entries
* @param writeQuorumSize
* number of bookies each entry will be written to
* @param ackQuorumSize
* number of bookies which must acknowledge an entry before the call is completed
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param cb
* createCallback implementation
* @param ctx
* optional control object
* @param customMetadata
* optional customMetadata that holds user specified metadata
*/
public void asyncCreateLedgerAdv(final int ensSize, final int writeQuorumSize, final int ackQuorumSize,
final DigestType digestType, final byte[] passwd, final CreateCallback cb, final Object ctx,
final Map<String, byte[]> customMetadata) {
if (writeQuorumSize < ackQuorumSize) {
throw new IllegalArgumentException("Write quorum must be larger than ack quorum");
}
closeLock.readLock().lock();
try {
if (closed) {
cb.createComplete(BKException.Code.ClientClosedException, null, ctx);
return;
}
new LedgerCreateOp(BookKeeper.this, ensSize, writeQuorumSize,
ackQuorumSize, digestType, passwd, cb, ctx,
customMetadata, WriteFlag.NONE, clientStats)
.initiateAdv(-1L);
} finally {
closeLock.readLock().unlock();
}
}
/**
* Synchronously creates a new ledger using the interface which accepts a ledgerId as input.
* This method returns {@link LedgerHandleAdv} which can accept entryId.
* Parameters must match those of asyncCreateLedgerAdvWithLedgerId
* @param ledgerId
* @param ensSize
* @param writeQuorumSize
* @param ackQuorumSize
* @param digestType
* @param passwd
* @param customMetadata
* @return a handle to the newly created ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle createLedgerAdv(final long ledgerId,
int ensSize,
int writeQuorumSize,
int ackQuorumSize,
DigestType digestType,
byte[] passwd,
final Map<String, byte[]> customMetadata)
throws InterruptedException, BKException {
CompletableFuture<LedgerHandleAdv> future = new CompletableFuture<>();
SyncCreateAdvCallback result = new SyncCreateAdvCallback(future);
/*
* Calls asynchronous version
*/
asyncCreateLedgerAdv(ledgerId, ensSize, writeQuorumSize, ackQuorumSize, digestType, passwd,
result, null, customMetadata);
LedgerHandle lh = SyncCallbackUtils.waitForResult(future);
if (lh == null) {
LOG.error("Unexpected condition : no ledger handle returned for a success ledger creation");
throw BKException.create(BKException.Code.UnexpectedConditionException);
} else if (ledgerId != lh.getId()) {
LOG.error("Unexpected condition : Expected ledgerId: {} but got: {}", ledgerId, lh.getId());
throw BKException.create(BKException.Code.UnexpectedConditionException);
}
LOG.info("Ensemble: {} for ledger: {}", lh.getLedgerMetadata().getEnsembleAt(0L), lh.getId());
return lh;
}
/**
* Asynchronously creates a new ledger using the interface which accepts a ledgerId as input.
* This method returns {@link LedgerHandleAdv} which can accept entryId.
* Ledgers created with this call have ability to accept
* a separate write quorum and ack quorum size. The write quorum must be larger than
* the ack quorum.
*
* <p>Separating the write and the ack quorum allows the BookKeeper client to continue
* writing when a bookie has failed but the failure has not yet been detected. Detecting
* a bookie has failed can take a number of seconds, as configured by the read timeout
* {@link ClientConfiguration#getReadTimeout()}. Once the bookie failure is detected,
* that bookie will be removed from the ensemble.
*
* <p>The other parameters match those of asyncCreateLedger</p>
*
* @param ledgerId
* ledger Id to use for the newly created ledger
* @param ensSize
* number of bookies over which to stripe entries
* @param writeQuorumSize
* number of bookies each entry will be written to
* @param ackQuorumSize
* number of bookies which must acknowledge an entry before the call is completed
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param cb
* createCallback implementation
* @param ctx
* optional control object
* @param customMetadata
* optional customMetadata that holds user specified metadata
*/
public void asyncCreateLedgerAdv(final long ledgerId,
final int ensSize,
final int writeQuorumSize,
final int ackQuorumSize,
final DigestType digestType,
final byte[] passwd,
final CreateCallback cb,
final Object ctx,
final Map<String, byte[]> customMetadata) {
if (writeQuorumSize < ackQuorumSize) {
throw new IllegalArgumentException("Write quorum must be larger than ack quorum");
}
closeLock.readLock().lock();
try {
if (closed) {
cb.createComplete(BKException.Code.ClientClosedException, null, ctx);
return;
}
new LedgerCreateOp(BookKeeper.this, ensSize, writeQuorumSize,
ackQuorumSize, digestType, passwd, cb, ctx,
customMetadata, WriteFlag.NONE, clientStats)
.initiateAdv(ledgerId);
} finally {
closeLock.readLock().unlock();
}
}
/**
* Open existing ledger asynchronously for reading.
*
* <p>Opening a ledger with this method invokes fencing and recovery on the ledger
* if the ledger has not been closed. Fencing will block all other clients from
* writing to the ledger. Recovery will make sure that the ledger is closed
* before reading from it.
*
* <p>Recovery also makes sure that any entries which reached one bookie, but not a
* quorum, will be replicated to a quorum of bookies. This occurs in cases were
* the writer of a ledger crashes after sending a write request to one bookie but
* before being able to send it to the rest of the bookies in the quorum.
*
* <p>If the ledger is already closed, neither fencing nor recovery will be applied.
*
* @see LedgerHandle#asyncClose
*
* @param lId
* ledger identifier
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param ctx
* optional control object
*/
public void asyncOpenLedger(final long lId, final DigestType digestType, final byte[] passwd,
final OpenCallback cb, final Object ctx) {
closeLock.readLock().lock();
try {
if (closed) {
cb.openComplete(BKException.Code.ClientClosedException, null, ctx);
return;
}
new LedgerOpenOp(BookKeeper.this, clientStats,
lId, digestType, passwd, cb, ctx).initiate();
} finally {
closeLock.readLock().unlock();
}
}
/**
* Open existing ledger asynchronously for reading, but it does not try to
* recover the ledger if it is not yet closed. The application needs to use
* it carefully, since the writer might have crashed and ledger will remain
* unsealed forever if there is no external mechanism to detect the failure
* of the writer and the ledger is not open in a safe manner, invoking the
* recovery procedure.
*
* <p>Opening a ledger without recovery does not fence the ledger. As such, other
* clients can continue to write to the ledger.
*
* <p>This method returns a read only ledger handle. It will not be possible
* to add entries to the ledger. Any attempt to add entries will throw an
* exception.
*
* <p>Reads from the returned ledger will be able to read entries up until
* the lastConfirmedEntry at the point in time at which the ledger was opened.
* If an attempt is made to read beyond the ledger handle's LAC, an attempt is made
* to get the latest LAC from bookies or metadata, and if the entry_id of the read request
* is less than or equal to the new LAC, read will be allowed to proceed.
*
* @param lId
* ledger identifier
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param ctx
* optional control object
*/
public void asyncOpenLedgerNoRecovery(final long lId, final DigestType digestType, final byte[] passwd,
final OpenCallback cb, final Object ctx) {
closeLock.readLock().lock();
try {
if (closed) {
cb.openComplete(BKException.Code.ClientClosedException, null, ctx);
return;
}
new LedgerOpenOp(BookKeeper.this, clientStats,
lId, digestType, passwd, cb, ctx).initiateWithoutRecovery();
} finally {
closeLock.readLock().unlock();
}
}
/**
* Synchronous open ledger call.
*
* @see #asyncOpenLedger
* @param lId
* ledger identifier
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @return a handle to the open ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle openLedger(long lId, DigestType digestType, byte[] passwd)
throws BKException, InterruptedException {
CompletableFuture<LedgerHandle> future = new CompletableFuture<>();
SyncOpenCallback result = new SyncOpenCallback(future);
/*
* Calls async open ledger
*/
asyncOpenLedger(lId, digestType, passwd, result, null);
return SyncCallbackUtils.waitForResult(future);
}
/**
* Synchronous, unsafe open ledger call.
*
* @see #asyncOpenLedgerNoRecovery
* @param lId
* ledger identifier
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @return a handle to the open ledger
* @throws InterruptedException
* @throws BKException
*/
public LedgerHandle openLedgerNoRecovery(long lId, DigestType digestType, byte[] passwd)
throws BKException, InterruptedException {
CompletableFuture<LedgerHandle> future = new CompletableFuture<>();
SyncOpenCallback result = new SyncOpenCallback(future);
/*
* Calls async open ledger
*/
asyncOpenLedgerNoRecovery(lId, digestType, passwd,
result, null);
return SyncCallbackUtils.waitForResult(future);
}
/**
* Deletes a ledger asynchronously.
*
* @param lId
* ledger Id
* @param cb
* deleteCallback implementation
* @param ctx
* optional control object
*/
public void asyncDeleteLedger(final long lId, final DeleteCallback cb, final Object ctx) {
closeLock.readLock().lock();
try {
if (closed) {
cb.deleteComplete(BKException.Code.ClientClosedException, ctx);
return;
}
new LedgerDeleteOp(BookKeeper.this, clientStats, lId, cb, ctx).initiate();
} finally {
closeLock.readLock().unlock();
}
}
/**
* Synchronous call to delete a ledger. Parameters match those of
* {@link #asyncDeleteLedger(long, AsyncCallback.DeleteCallback, Object)}
*
* @param lId
* ledgerId
* @throws InterruptedException
* @throws BKException
*/
public void deleteLedger(long lId) throws InterruptedException, BKException {
CompletableFuture<Void> future = new CompletableFuture<>();
SyncDeleteCallback result = new SyncDeleteCallback(future);
// Call asynchronous version
asyncDeleteLedger(lId, result, null);
SyncCallbackUtils.waitForResult(future);
}
/**
* Check asynchronously whether the ledger with identifier <i>lId</i>
* has been closed.
*
* @param lId ledger identifier
* @param cb callback method
*/
public void asyncIsClosed(long lId, final IsClosedCallback cb, final Object ctx){
ledgerManager.readLedgerMetadata(lId).whenComplete((metadata, exception) -> {
if (exception == null) {
cb.isClosedComplete(BKException.Code.OK, metadata.getValue().isClosed(), ctx);
} else {
cb.isClosedComplete(BKException.getExceptionCode(exception), false, ctx);
}
});
}
/**
* Check whether the ledger with identifier <i>lId</i>
* has been closed.
*
* @param lId
* @return boolean true if ledger has been closed
* @throws BKException
*/
public boolean isClosed(long lId)
throws BKException, InterruptedException {
final class Result {
int rc;
boolean isClosed;
final CountDownLatch notifier = new CountDownLatch(1);
}
final Result result = new Result();
final IsClosedCallback cb = new IsClosedCallback(){
@Override
public void isClosedComplete(int rc, boolean isClosed, Object ctx){
result.isClosed = isClosed;
result.rc = rc;
result.notifier.countDown();
}
};
/*
* Call asynchronous version of isClosed
*/
asyncIsClosed(lId, cb, null);
/*
* Wait for callback
*/
result.notifier.await();
if (result.rc != BKException.Code.OK) {
throw BKException.create(result.rc);
}
return result.isClosed;
}
/**
* Shuts down client.
*
*/
@Override
public void close() throws BKException, InterruptedException {
closeLock.writeLock().lock();
try {
if (closed) {
return;
}
closed = true;
} finally {
closeLock.writeLock().unlock();
}
// Close bookie client so all pending bookie requests would be failed
// which will reject any incoming bookie requests.
bookieClient.close();
try {
// Close ledger manage so all pending metadata requests would be failed
// which will reject any incoming metadata requests.
ledgerManager.close();
ledgerIdGenerator.close();
} catch (IOException ie) {
LOG.error("Failed to close ledger manager : ", ie);
}
// Close the scheduler
scheduler.shutdown();
if (!scheduler.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.warn("The scheduler did not shutdown cleanly");
}
mainWorkerPool.shutdown();
if (!mainWorkerPool.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.warn("The mainWorkerPool did not shutdown cleanly");
}
if (this.bookieInfoScheduler != null) {
this.bookieInfoScheduler.shutdown();
if (!bookieInfoScheduler.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.warn("The bookieInfoScheduler did not shutdown cleanly");
}
}
if (ownTimer) {
requestTimer.stop();
}
if (ownEventLoopGroup) {
eventLoopGroup.shutdownGracefully();
}
this.metadataDriver.close();
}
@Override
public CreateBuilder newCreateLedgerOp() {
return new LedgerCreateOp.CreateBuilderImpl(this);
}
@Override
public OpenBuilder newOpenLedgerOp() {
return new LedgerOpenOp.OpenBuilderImpl(this);
}
@Override
public DeleteBuilder newDeleteLedgerOp() {
return new LedgerDeleteOp.DeleteBuilderImpl(this);
}
private static final class SyncLedgerIterator implements LedgersIterator {
private final LedgerRangeIterator iterator;
private final ListLedgersResultImpl parent;
Iterator<Long> currentRange = null;
public SyncLedgerIterator(LedgerRangeIterator iterator, ListLedgersResultImpl parent) {
this.iterator = iterator;
this.parent = parent;
}
@Override
public boolean hasNext() throws IOException {
parent.checkClosed();
if (currentRange != null) {
if (currentRange.hasNext()) {
return true;
}
} else if (iterator.hasNext()) {
return true;
}
return false;
}
@Override
public long next() throws IOException {
parent.checkClosed();
if (currentRange == null || !currentRange.hasNext()) {
currentRange = iterator.next().getLedgers().iterator();
}
return currentRange.next();
}
}
private static final class ListLedgersResultImpl implements ListLedgersResult {
private final LedgerRangeIterator iterator;
private boolean closed = false;
private LedgersIterator ledgersIterator;
public ListLedgersResultImpl(LedgerRangeIterator iterator) {
this.iterator = iterator;
}
void checkClosed() {
if (closed) {
throw new IllegalStateException("ListLedgersResult is closed");
}
}
private void initLedgersIterator() {
if (ledgersIterator != null) {
throw new IllegalStateException("LedgersIterator must be requested once");
}
ledgersIterator = new SyncLedgerIterator(iterator, this);
}
@Override
public LedgersIterator iterator() {
checkClosed();
initLedgersIterator();
return ledgersIterator;
}
@Override
public Iterable<Long> toIterable() {
checkClosed();
initLedgersIterator();
return () -> new Iterator<Long>() {
@Override
public boolean hasNext() {
try {
return ledgersIterator.hasNext();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
@Override
public Long next() {
try {
return ledgersIterator.next();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
};
}
@Override
public void close() throws Exception {
closed = true;
}
}
@Override
public ListLedgersResultBuilder newListLedgersOp() {
return () -> {
final LedgerRangeIterator iterator = getLedgerManager().getLedgerRanges(0);
return CompletableFuture.completedFuture(new ListLedgersResultImpl(iterator));
};
}
@Override
public CompletableFuture<LedgerMetadata> getLedgerMetadata(long ledgerId) {
CompletableFuture<Versioned<LedgerMetadata>> versioned = getLedgerManager().readLedgerMetadata(ledgerId);
return versioned.thenApply(versionedLedgerMetadata -> {
return versionedLedgerMetadata.getValue();
});
}
private final ClientContext clientCtx = new ClientContext() {
@Override
public ClientInternalConf getConf() {
return internalConf;
}
@Override
public LedgerManager getLedgerManager() {
return BookKeeper.this.getLedgerManager();
}
@Override
public BookieWatcher getBookieWatcher() {
return BookKeeper.this.getBookieWatcher();
}
@Override
public EnsemblePlacementPolicy getPlacementPolicy() {
return BookKeeper.this.getPlacementPolicy();
}
@Override
public BookieClient getBookieClient() {
return BookKeeper.this.getBookieClient();
}
@Override
public OrderedExecutor getMainWorkerPool() {
return BookKeeper.this.getMainWorkerPool();
}
@Override
public OrderedScheduler getScheduler() {
return BookKeeper.this.getScheduler();
}
@Override
public BookKeeperClientStats getClientStats() {
return clientStats;
}
@Override
public boolean isClientClosed() {
return BookKeeper.this.isClosed();
}
@Override
public ByteBufAllocator getByteBufAllocator() {
return allocator;
}
};
public ClientContext getClientCtx() {
return clientCtx;
}
}
| 337 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.util.HashedWheelTimer;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.DistributionSchedule.WriteSet;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.common.annotation.InterfaceAudience;
import org.apache.bookkeeper.common.annotation.InterfaceStability;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* {@link EnsemblePlacementPolicy} encapsulates the algorithm that bookkeeper client uses to select a number of bookies
* from the cluster as an ensemble for storing entries.
*
* <p>The algorithm is typically implemented based on the data input as well as the network topology properties.
*
* <h2>How does it work?</h2>
*
* <p>This interface basically covers three parts:</p>
* <ul>
* <li>Initialization and uninitialization</li>
* <li>How to choose bookies to place data</li>
* <li>How to choose bookies to do speculative reads</li>
* </ul>
*
* <h3>Initialization and uninitialization</h3>
*
* <p>The ensemble placement policy is constructed by jvm reflection during constructing bookkeeper client.
* After the {@code EnsemblePlacementPolicy} is constructed, bookkeeper client will call
* {@link #initialize(ClientConfiguration, Optional, HashedWheelTimer, FeatureProvider, StatsLogger,
* BookieAddressResolver)} to initialize the placement policy.
*
* <p>The {@link #initialize(ClientConfiguration, Optional, HashedWheelTimer, FeatureProvider, StatsLogger,
* BookieAddressResolver)} method takes a few resources from bookkeeper for instantiating itself.
* These resources include:
*
* <ul>
* <li>`ClientConfiguration` : The client configuration that used for constructing the bookkeeper client.
* The implementation of the placement policy could obtain its settings from this
* configuration.
* <li>`DNSToSwitchMapping`: The DNS resolver for the ensemble policy to build the network topology of the bookies
* cluster. It is optional.
* <li>`HashedWheelTimer`: A hashed wheel timer that could be used for timing related work.
* For example, a stabilize network topology could use it to delay network topology changes to
* reduce impacts of flapping bookie registrations due to zk session expires.
* <li>`FeatureProvider`: A {@link FeatureProvider} that the policy could use for enabling or disabling its offered
* features. For example, a {@link RegionAwareEnsemblePlacementPolicy} could offer features
* to disable placing data to a specific region at runtime.
* <li>`StatsLogger`: A {@link StatsLogger} for exposing stats.
* </ul>
*
* <p>The ensemble placement policy is a single instance per bookkeeper client. The instance will
* be {@link #uninitalize()} when closing the bookkeeper client. The implementation of a placement policy should be
* responsible for releasing all the resources that allocated during
* {@link #initialize(ClientConfiguration, Optional, HashedWheelTimer, FeatureProvider, StatsLogger,
* BookieAddressResolver)}.
*
* <h3>How to choose bookies to place data</h3>
*
* <p>The bookkeeper client discovers list of bookies from zookeeper via {@code BookieWatcher} - whenever there are
* bookie changes, the ensemble placement policy will be notified with new list of bookies via
* {@link #onClusterChanged(Set, Set)}. The implementation of the ensemble placement policy will react on those
* changes to build new network topology. Subsequent operations like {@link #newEnsemble(int, int, int, Map, Set)} or
* {@link #replaceBookie(int, int, int, java.util.Map, java.util.List, BookieId, java.util.Set)}
* hence can operate on the new
* network topology.
*
* <p>Both {@link RackawareEnsemblePlacementPolicy} and {@link RegionAwareEnsemblePlacementPolicy} are
* {@link TopologyAwareEnsemblePlacementPolicy}s. They build a {@link org.apache.bookkeeper.net.NetworkTopology} on
* bookie changes, use it for ensemble placement and ensure {@code rack/region} coverage for write quorums.
*
* <h4>Network Topology</h4>
*
* <p>The network topology is presenting a cluster of bookies in a tree hierarchical structure. For example,
* a bookie cluster may be consists of many data centers (aka regions) filled with racks of machines.
* In this tree structure, leaves represent bookies and inner nodes represent switches/routes that manage
* traffic in/out of regions or racks.
*
* <p>For example, there are 3 bookies in region `A`. They are `bk1`, `bk2` and `bk3`. And their network locations are
* {@code /region-a/rack-1/bk1}, {@code /region-a/rack-1/bk2} and {@code /region-a/rack-2/bk3}. So the network topology
* will look like below:
*
* <pre>
* root
* |
* region-a
* / \
* rack-1 rack-2
* / \ \
* bk1 bk2 bk3
* </pre>
*
* <p>Another example, there are 4 bookies spanning in two regions `A` and `B`. They are `bk1`, `bk2`, `bk3` and `bk4`.
* And their network locations are {@code /region-a/rack-1/bk1}, {@code /region-a/rack-1/bk2},
* {@code /region-b/rack-2/bk3} and {@code /region-b/rack-2/bk4}. The network topology will look like below:
*
* <pre>
* root
* / \
* region-a region-b
* | |
* rack-1 rack-2
* / \ / \
* bk1 bk2 bk3 bk4
* </pre>
*
* <p>The network location of each bookie is resolved by a {@link DNSToSwitchMapping}. The {@link DNSToSwitchMapping}
* resolves a list of DNS-names or IP-addresses into a list of network locations. The network location that is returned
* must be a network path of the form `/region/rack`, where `/` is the root, and `region` is the region id representing
* the data center where `rack` is located. The network topology of the bookie cluster would determine the number of
*
* <h4>RackAware and RegionAware</h4>
*
* <p>{@link RackawareEnsemblePlacementPolicy} basically just chooses bookies from different racks in the built
* network topology. It guarantees that a write quorum will cover at least two racks. It expects the network locations
* resolved by {@link DNSToSwitchMapping} have at least 2 levels. For example, network location paths like
* {@code /dc1/rack0} and {@code /dc1/row1/rack0} are okay, but {@code /rack0} is not acceptable.
*
* <p>{@link RegionAwareEnsemblePlacementPolicy} is a hierarchical placement policy, which it chooses
* equal-sized bookies from regions, and within each region it uses {@link RackawareEnsemblePlacementPolicy} to choose
* bookies from racks. For example, if there is 3 regions - {@code region-a}, {@code region-b} and {@code region-c},
* an application want to allocate a {@code 15-bookies} ensemble. First, it would figure out there are 3 regions and
* it should allocate 5 bookies from each region. Second, for each region, it would use
* {@link RackawareEnsemblePlacementPolicy} to choose <i>5</i> bookies.
*
* <p>Since {@link RegionAwareEnsemblePlacementPolicy} is based on {@link RackawareEnsemblePlacementPolicy}, it expects
* the network locations resolved by {@link DNSToSwitchMapping} have at least <b>3</b> levels.
*
* <h3>How to choose bookies to do speculative reads?</h3>
*
* <p>{@link #reorderReadSequence(List, BookiesHealthInfo, WriteSet)} and
* {@link #reorderReadLACSequence(List, BookiesHealthInfo, WriteSet)} are
* two methods exposed by the placement policy, to help client determine a better read sequence according to the
* network topology and the bookie failure history.
*
* <p>For example, in {@link RackawareEnsemblePlacementPolicy}, the reads will be attempted in following sequence:
*
* <ul>
* <li>bookies are writable and didn't experience failures before
* <li>bookies are writable and experienced failures before
* <li>bookies are readonly
* <li>bookies already disappeared from network topology
* </ul>
*
* <p>In {@link RegionAwareEnsemblePlacementPolicy}, the reads will be tried in similar following sequence
* as `RackAware` placement policy. There is a slight different on trying writable bookies: after trying every 2
* bookies from local region, it would try a bookie from remote region. Hence it would achieve low latency even
* there is network issues within local region.
*
* <h2>How to configure the placement policy?</h2>
*
* <p>Currently there are 3 implementations available by default. They are:
* <ul>
* <li>{@link DefaultEnsemblePlacementPolicy}</li>
* <li>{@link RackawareEnsemblePlacementPolicy}</li>
* <li>{@link RegionAwareEnsemblePlacementPolicy}</li>
* </ul>
*
* <p>You can configure the ensemble policy by specifying the placement policy class in
* {@link ClientConfiguration#setEnsemblePlacementPolicy(Class)}.
*
* <p>{@link DefaultEnsemblePlacementPolicy} randomly pickups bookies from the cluster, while both
* {@link RackawareEnsemblePlacementPolicy} and {@link RegionAwareEnsemblePlacementPolicy} choose bookies based on
* network locations. So you might also consider configuring a proper {@link DNSToSwitchMapping} in
* {@link BookKeeper.Builder} to resolve the correct network locations for your cluster.
*
* @see TopologyAwareEnsemblePlacementPolicy
* @see DefaultEnsemblePlacementPolicy
* @see RackawareEnsemblePlacementPolicy
* @see RegionAwareEnsemblePlacementPolicy
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface EnsemblePlacementPolicy {
/**
* Initialize the policy.
*
* @param conf client configuration
* @param optionalDnsResolver dns resolver
* @param hashedWheelTimer timer
* @param featureProvider feature provider
* @param statsLogger stats logger
*
* @since 4.5
*/
EnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver,
HashedWheelTimer hashedWheelTimer,
FeatureProvider featureProvider,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver);
/**
* Uninitialize the policy.
*/
void uninitalize();
/**
* A consistent view of the cluster (what bookies are available as writable, what bookies are available as
* readonly) is updated when any changes happen in the cluster.
*
* <p>The implementation should take actions when the cluster view is changed. So subsequent
* {@link #newEnsemble(int, int, int, Map, Set)} and
* {@link #replaceBookie(int, int, int, java.util.Map, java.util.List, BookieId, java.util.Set) }
* can choose proper bookies.
*
* @param writableBookies
* All the bookies in the cluster available for write/read.
* @param readOnlyBookies
* All the bookies in the cluster available for readonly.
* @return the dead bookies during this cluster change.
*/
Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies);
/**
* Choose <i>numBookies</i> bookies for ensemble. If the count is more than the number of available
* nodes, {@link BKNotEnoughBookiesException} is thrown.
*
* <p>The implementation should respect to the replace settings. The size of the returned bookie list
* should be equal to the provide {@code ensembleSize}.
*
* <p>{@code customMetadata} is the same user defined data that user provides
* when {@link BookKeeper#createLedger(int, int, int, BookKeeper.DigestType, byte[], Map)}.
*
* <p>If 'enforceMinNumRacksPerWriteQuorum' config is enabled then the bookies belonging to default
* faultzone (rack) will be excluded while selecting bookies.
*
* @param ensembleSize
* Ensemble Size
* @param writeQuorumSize
* Write Quorum Size
* @param ackQuorumSize
* the value of ackQuorumSize (added since 4.5)
* @param customMetadata the value of customMetadata. it is the same user defined metadata that user
* provides in {@link BookKeeper#createLedger(int, int, int, BookKeeper.DigestType, byte[])}
* @param excludeBookies Bookies that should not be considered as targets.
* @throws BKNotEnoughBookiesException if not enough bookies available.
* @return a placement result containing list of bookie addresses for the ensemble.
*/
PlacementResult<List<BookieId>> newEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Map<String, byte[]> customMetadata,
Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException;
/**
* Choose a new bookie to replace <i>bookieToReplace</i>. If no bookie available in the cluster,
* {@link BKNotEnoughBookiesException} is thrown.
*
* <p>If 'enforceMinNumRacksPerWriteQuorum' config is enabled then the bookies belonging to default
* faultzone (rack) will be excluded while selecting bookies.
*
* @param ensembleSize
* the value of ensembleSize
* @param writeQuorumSize
* the value of writeQuorumSize
* @param ackQuorumSize the value of ackQuorumSize (added since 4.5)
* @param customMetadata the value of customMetadata. it is the same user defined metadata that user
* provides in {@link BookKeeper#createLedger(int, int, int, BookKeeper.DigestType, byte[])}
* @param currentEnsemble the value of currentEnsemble
* @param bookieToReplace bookie to replace
* @param excludeBookies bookies that should not be considered as candidate.
* @throws BKNotEnoughBookiesException
* @return a placement result containing the new bookie address.
*/
PlacementResult<BookieId> replaceBookie(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Map<String, byte[]> customMetadata,
List<BookieId> currentEnsemble,
BookieId bookieToReplace,
Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException;
/**
* Register a bookie as slow so that it is tried after available and read-only bookies.
*
* @param bookieSocketAddress
* Address of bookie host
* @param entryId
* Entry ID that caused a speculative timeout on the bookie.
*/
void registerSlowBookie(BookieId bookieSocketAddress, long entryId);
/**
* Reorder the read sequence of a given write quorum <i>writeSet</i>.
*
* @param ensemble
* Ensemble to read entries.
* @param bookiesHealthInfo
* Health info for bookies
* @param writeSet
* Write quorum to read entries. This will be modified, rather than
* allocating a new WriteSet.
* @return The read sequence. This will be the same object as the passed in
* writeSet.
* @since 4.5
*/
DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet);
/**
* Reorder the read last add confirmed sequence of a given write quorum <i>writeSet</i>.
*
* @param ensemble
* Ensemble to read entries.
* @param bookiesHealthInfo
* Health info for bookies
* @param writeSet
* Write quorum to read entries. This will be modified, rather than
* allocating a new WriteSet.
* @return The read sequence. This will be the same object as the passed in
* writeSet.
* @since 4.5
*/
DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet);
/**
* Send the bookie info details.
*
* @param bookieInfoMap
* A map that has the bookie to BookieInfo
* @since 4.5
*/
default void updateBookieInfo(Map<BookieId, BookieInfo> bookieInfoMap) {
}
/**
* Select one bookie to the "sticky" bookie where all reads for a particular
* ledger will be directed to.
*
* <p>The default implementation will pick a bookie randomly from the ensemble.
* Other placement policies will be able to do better decisions based on
* additional informations (eg: rack or region awareness).
*
* @param metadata
* the {@link LedgerMetadata} object
* @param currentStickyBookieIndex
* if we are changing the sticky bookie after a read failure, the
* current sticky bookie is passed in so that we will avoid
* choosing it again
* @return the index, within the ensemble of the bookie chosen as the sticky
* bookie
*
* @since 4.9
*/
default int getStickyReadBookieIndex(LedgerMetadata metadata, Optional<Integer> currentStickyBookieIndex) {
if (!currentStickyBookieIndex.isPresent()) {
// Pick one bookie randomly from the current ensemble as the initial
// "sticky bookie"
return ThreadLocalRandom.current().nextInt(metadata.getEnsembleSize());
} else {
// When choosing a new sticky bookie index (eg: after the current
// one has read failures), by default we pick the next one in the
// ensemble, to avoid picking up the same one again.
return MathUtils.signSafeMod(currentStickyBookieIndex.get() + 1, metadata.getEnsembleSize());
}
}
/**
* returns AdherenceLevel if the Ensemble is strictly/softly/fails adhering
* to placement policy, like in the case of
* RackawareEnsemblePlacementPolicy, bookies in the writeset are from
* 'minNumRacksPerWriteQuorum' number of racks. And in the case of
* RegionawareEnsemblePlacementPolicy, check for
* minimumRegionsForDurability, reppRegionsToWrite, rack distribution within
* a region and other parameters of RegionAwareEnsemblePlacementPolicy. In
* ZoneAwareEnsemblePlacementPolicy if bookies in the writeset are from
* 'desiredNumOfZones' then it is considered as MEETS_STRICT if they are
* from 'minNumOfZones' then it is considered as MEETS_SOFT otherwise
* considered as FAIL.
*
* @param ensembleList
* list of BookieId of bookies in the ensemble
* @param writeQuorumSize
* writeQuorumSize of the ensemble
* @param ackQuorumSize
* ackQuorumSize of the ensemble
* @return
*/
default PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
return PlacementPolicyAdherence.FAIL;
}
/**
* Returns true if the bookies that have acknowledged a write adhere to the minimum fault domains as defined in the
* placement policy in use. Ex: In the case of RackawareEnsemblePlacementPolicy, bookies belong to at least
* 'minNumRacksPerWriteQuorum' number of racks.
*
* @param ackedBookies
* list of BookieId of bookies that have acknowledged a write.
* @param writeQuorumSize
* writeQuorumSize of the ensemble
* @param ackQuorumSize
* ackQuorumSize of the ensemble
* @return
*/
default boolean areAckedBookiesAdheringToPlacementPolicy(Set<BookieId> ackedBookies,
int writeQuorumSize,
int ackQuorumSize) {
return true;
}
/**
* Returns placement result. If the currentEnsemble is not adhering placement policy, returns new ensemble that
* adheres placement policy. It should be implemented so as to minify the number of bookies replaced.
*
* @param ensembleSize
* ensemble size
* @param writeQuorumSize
* writeQuorumSize of the ensemble
* @param ackQuorumSize
* ackQuorumSize of the ensemble
* @param excludeBookies
* bookies that should not be considered as targets
* @param currentEnsemble
* current ensemble
* @return a placement result
*/
default PlacementResult<List<BookieId>> replaceToAdherePlacementPolicy(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
List<BookieId> currentEnsemble) {
throw new UnsupportedOperationException();
}
/**
* enum for PlacementPolicyAdherence. Currently we are supporting tri-value
* enum for PlacementPolicyAdherence. If placement policy is met strictly
* then it is MEETS_STRICT, if it doesn't adhere to placement policy then it
* is FAIL. But there are certain placement policies, like
* ZoneAwareEnsemblePlacementPolicy which has definition of soft adherence
* level to support zone down scenarios.
*/
enum PlacementPolicyAdherence {
FAIL(1), MEETS_SOFT(3), MEETS_STRICT(5);
private int numVal;
private PlacementPolicyAdherence(int numVal) {
this.numVal = numVal;
}
public int getNumVal() {
return numVal;
}
}
/**
* Result of a placement calculation against a placement policy.
*/
final class PlacementResult<T> {
private final T result;
private final PlacementPolicyAdherence policyAdherence;
public static <T> PlacementResult<T> of(T result, PlacementPolicyAdherence policyAdherence) {
return new PlacementResult<>(result, policyAdherence);
}
private PlacementResult(T result, PlacementPolicyAdherence policyAdherence) {
this.result = result;
this.policyAdherence = policyAdherence;
}
public T getResult() {
return result;
}
/**
* Use {@link #getAdheringToPolicy}.
*/
@Deprecated
public PlacementPolicyAdherence isAdheringToPolicy() {
return policyAdherence;
}
public PlacementPolicyAdherence getAdheringToPolicy() {
return policyAdherence;
}
}
}
| 338 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ListenerBasedPendingReadOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryListener;
import org.apache.bookkeeper.util.MathUtils;
@Slf4j
class ListenerBasedPendingReadOp extends PendingReadOp {
final ReadEntryListener listener;
final Object ctx;
ListenerBasedPendingReadOp(LedgerHandle lh,
ClientContext clientCtx,
long startEntryId,
long endEntryId,
ReadEntryListener listener,
Object ctx,
boolean isRecoveryRead) {
super(lh, clientCtx, startEntryId, endEntryId, isRecoveryRead);
this.listener = listener;
this.ctx = ctx;
}
@Override
protected void submitCallback(int code) {
LedgerEntryRequest request;
while (!seq.isEmpty() && (request = seq.getFirst()) != null) {
if (!request.isComplete()) {
return;
}
seq.removeFirst();
long latencyNanos = MathUtils.elapsedNanos(requestTimeNanos);
LedgerEntry entry;
if (BKException.Code.OK == request.getRc()) {
clientCtx.getClientStats().getReadOpLogger()
.registerSuccessfulEvent(latencyNanos, TimeUnit.NANOSECONDS);
// callback with completed entry
entry = new LedgerEntry(request.entryImpl);
} else {
clientCtx.getClientStats().getReadOpLogger()
.registerFailedEvent(latencyNanos, TimeUnit.NANOSECONDS);
entry = null;
}
request.close();
listener.onEntryComplete(request.getRc(), lh, entry, ctx);
}
// if all entries are already completed.
cancelSpeculativeTask(true);
}
}
| 339 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import io.netty.buffer.ByteBuf;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.bookkeeper.client.BKException.Code;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A utility class to check the complete ledger and finds the UnderReplicated fragments if any.
*
* <p>NOTE: This class is tended to be used by this project only. External users should not rely on it directly.
*/
public class LedgerChecker {
private static final Logger LOG = LoggerFactory.getLogger(LedgerChecker.class);
public final BookieClient bookieClient;
public final BookieWatcher bookieWatcher;
final Semaphore semaphore;
static class InvalidFragmentException extends Exception {
private static final long serialVersionUID = 1467201276417062353L;
}
/**
* This will collect all the entry read call backs and finally it will give
* call back to previous call back API which is waiting for it once it meets
* the expected call backs from down.
*/
private class ReadManyEntriesCallback implements ReadEntryCallback {
AtomicBoolean completed = new AtomicBoolean(false);
final AtomicLong numEntries;
final LedgerFragment fragment;
final GenericCallback<LedgerFragment> cb;
ReadManyEntriesCallback(long numEntries, LedgerFragment fragment,
GenericCallback<LedgerFragment> cb) {
this.numEntries = new AtomicLong(numEntries);
this.fragment = fragment;
this.cb = cb;
}
@Override
public void readEntryComplete(int rc, long ledgerId, long entryId,
ByteBuf buffer, Object ctx) {
releasePermit();
if (rc == BKException.Code.OK) {
if (numEntries.decrementAndGet() == 0
&& !completed.getAndSet(true)) {
cb.operationComplete(rc, fragment);
}
} else if (!completed.getAndSet(true)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Read {}:{} from {} failed, the error code: {}", ledgerId, entryId, ctx, rc);
}
cb.operationComplete(rc, fragment);
}
}
}
/**
* This will collect the bad bookies inside a ledger fragment.
*/
private static class LedgerFragmentCallback implements GenericCallback<LedgerFragment> {
private final LedgerFragment fragment;
private final int bookieIndex;
// bookie index -> return code
private final Map<Integer, Integer> badBookies;
private final AtomicInteger numBookies;
private final GenericCallback<LedgerFragment> cb;
LedgerFragmentCallback(LedgerFragment lf,
int bookieIndex,
GenericCallback<LedgerFragment> cb,
Map<Integer, Integer> badBookies,
AtomicInteger numBookies) {
this.fragment = lf;
this.bookieIndex = bookieIndex;
this.cb = cb;
this.badBookies = badBookies;
this.numBookies = numBookies;
}
@Override
public void operationComplete(int rc, LedgerFragment lf) {
if (BKException.Code.OK != rc) {
synchronized (badBookies) {
badBookies.put(bookieIndex, rc);
}
}
if (numBookies.decrementAndGet() == 0) {
if (badBookies.isEmpty()) {
cb.operationComplete(BKException.Code.OK, fragment);
} else {
int rcToReturn = BKException.Code.NoBookieAvailableException;
for (Map.Entry<Integer, Integer> entry : badBookies.entrySet()) {
rcToReturn = entry.getValue();
if (entry.getValue() == BKException.Code.ClientClosedException) {
break;
}
}
cb.operationComplete(rcToReturn,
fragment.subset(badBookies.keySet()));
}
}
}
}
public LedgerChecker(BookKeeper bkc) {
this(bkc.getBookieClient(), bkc.getBookieWatcher());
}
public LedgerChecker(BookieClient client, BookieWatcher watcher) {
this(client, watcher, -1);
}
public LedgerChecker(BookKeeper bkc, int inFlightReadEntryNum) {
this(bkc.getBookieClient(), bkc.getBookieWatcher(), inFlightReadEntryNum);
}
public LedgerChecker(BookieClient client, BookieWatcher watcher, int inFlightReadEntryNum) {
bookieClient = client;
bookieWatcher = watcher;
if (inFlightReadEntryNum > 0) {
semaphore = new Semaphore(inFlightReadEntryNum);
} else {
semaphore = null;
}
}
/**
* Acquires a permit from permit manager,
* blocking until all are available.
*/
public void acquirePermit() throws InterruptedException {
if (null != semaphore) {
semaphore.acquire(1);
}
}
/**
* Release a given permit.
*/
public void releasePermit() {
if (null != semaphore) {
semaphore.release();
}
}
/**
* Verify a ledger fragment to collect bad bookies.
*
* @param fragment
* fragment to verify
* @param cb
* callback
* @throws InvalidFragmentException
*/
private void verifyLedgerFragment(LedgerFragment fragment,
GenericCallback<LedgerFragment> cb,
Long percentageOfLedgerFragmentToBeVerified)
throws InvalidFragmentException, BKException, InterruptedException {
Set<Integer> bookiesToCheck = fragment.getBookiesIndexes();
if (bookiesToCheck.isEmpty()) {
cb.operationComplete(BKException.Code.OK, fragment);
return;
}
AtomicInteger numBookies = new AtomicInteger(bookiesToCheck.size());
Map<Integer, Integer> badBookies = new HashMap<Integer, Integer>();
for (Integer bookieIndex : bookiesToCheck) {
LedgerFragmentCallback lfCb = new LedgerFragmentCallback(
fragment, bookieIndex, cb, badBookies, numBookies);
verifyLedgerFragment(fragment, bookieIndex, lfCb, percentageOfLedgerFragmentToBeVerified);
}
}
/**
* Verify a bookie inside a ledger fragment.
*
* @param fragment
* ledger fragment
* @param bookieIndex
* bookie index in the fragment
* @param cb
* callback
* @throws InvalidFragmentException
*/
private void verifyLedgerFragment(LedgerFragment fragment,
int bookieIndex,
GenericCallback<LedgerFragment> cb,
long percentageOfLedgerFragmentToBeVerified)
throws InvalidFragmentException, InterruptedException {
long firstStored = fragment.getFirstStoredEntryId(bookieIndex);
long lastStored = fragment.getLastStoredEntryId(bookieIndex);
BookieId bookie = fragment.getAddress(bookieIndex);
if (null == bookie) {
throw new InvalidFragmentException();
}
if (firstStored == LedgerHandle.INVALID_ENTRY_ID) {
// this fragment is not on this bookie
if (lastStored != LedgerHandle.INVALID_ENTRY_ID) {
throw new InvalidFragmentException();
}
if (bookieWatcher.isBookieUnavailable(fragment.getAddress(bookieIndex))) {
// fragment is on this bookie, but already know it's unavailable, so skip the call
cb.operationComplete(BKException.Code.BookieHandleNotAvailableException, fragment);
} else {
cb.operationComplete(BKException.Code.OK, fragment);
}
} else if (bookieWatcher.isBookieUnavailable(fragment.getAddress(bookieIndex))) {
// fragment is on this bookie, but already know it's unavailable, so skip the call
cb.operationComplete(BKException.Code.BookieHandleNotAvailableException, fragment);
} else if (firstStored == lastStored) {
acquirePermit();
ReadManyEntriesCallback manycb = new ReadManyEntriesCallback(1,
fragment, cb);
bookieClient.readEntry(bookie, fragment.getLedgerId(), firstStored,
manycb, bookie, BookieProtocol.FLAG_NONE);
} else {
if (lastStored <= firstStored) {
cb.operationComplete(Code.IncorrectParameterException, null);
return;
}
long lengthOfLedgerFragment = lastStored - firstStored + 1;
int numberOfEntriesToBeVerified =
(int) (lengthOfLedgerFragment * (percentageOfLedgerFragmentToBeVerified / 100.0));
TreeSet<Long> entriesToBeVerified = new TreeSet<Long>();
if (numberOfEntriesToBeVerified < lengthOfLedgerFragment) {
// Evenly pick random entries over the length of the fragment
if (numberOfEntriesToBeVerified > 0) {
int lengthOfBucket = (int) (lengthOfLedgerFragment / numberOfEntriesToBeVerified);
for (long index = firstStored;
index < (lastStored - lengthOfBucket - 1);
index += lengthOfBucket) {
long potentialEntryId = ThreadLocalRandom.current().nextInt((lengthOfBucket)) + index;
if (fragment.isStoredEntryId(potentialEntryId, bookieIndex)) {
entriesToBeVerified.add(potentialEntryId);
}
}
}
entriesToBeVerified.add(firstStored);
entriesToBeVerified.add(lastStored);
} else {
// Verify the entire fragment
while (firstStored <= lastStored) {
if (fragment.isStoredEntryId(firstStored, bookieIndex)) {
entriesToBeVerified.add(firstStored);
}
firstStored++;
}
}
ReadManyEntriesCallback manycb = new ReadManyEntriesCallback(entriesToBeVerified.size(),
fragment, cb);
for (Long entryID: entriesToBeVerified) {
acquirePermit();
bookieClient.readEntry(bookie, fragment.getLedgerId(), entryID, manycb, bookie,
BookieProtocol.FLAG_NONE);
}
}
}
/**
* Callback for checking whether an entry exists or not.
* It is used to differentiate the cases where it has been written
* but now cannot be read, and where it never has been written.
*/
private class EntryExistsCallback implements ReadEntryCallback {
AtomicBoolean entryMayExist = new AtomicBoolean(false);
final AtomicInteger numReads;
final GenericCallback<Boolean> cb;
EntryExistsCallback(int numReads,
GenericCallback<Boolean> cb) {
this.numReads = new AtomicInteger(numReads);
this.cb = cb;
}
@Override
public void readEntryComplete(int rc, long ledgerId, long entryId,
ByteBuf buffer, Object ctx) {
releasePermit();
if (BKException.Code.NoSuchEntryException != rc && BKException.Code.NoSuchLedgerExistsException != rc
&& BKException.Code.NoSuchLedgerExistsOnMetadataServerException != rc) {
entryMayExist.set(true);
}
if (numReads.decrementAndGet() == 0) {
cb.operationComplete(rc, entryMayExist.get());
}
}
}
/**
* This will collect all the fragment read call backs and finally it will
* give call back to above call back API which is waiting for it once it
* meets the expected call backs from down.
*/
private static class FullLedgerCallback implements
GenericCallback<LedgerFragment> {
final Set<LedgerFragment> badFragments;
final AtomicLong numFragments;
final GenericCallback<Set<LedgerFragment>> cb;
FullLedgerCallback(long numFragments,
GenericCallback<Set<LedgerFragment>> cb) {
badFragments = new LinkedHashSet<>();
this.numFragments = new AtomicLong(numFragments);
this.cb = cb;
}
@Override
public void operationComplete(int rc, LedgerFragment result) {
if (rc == BKException.Code.ClientClosedException) {
cb.operationComplete(BKException.Code.ClientClosedException, badFragments);
return;
} else if (rc != BKException.Code.OK) {
badFragments.add(result);
}
if (numFragments.decrementAndGet() == 0) {
cb.operationComplete(BKException.Code.OK, badFragments);
}
}
}
/**
* Check that all the fragments in the passed in ledger, and report those
* which are missing.
*/
public void checkLedger(final LedgerHandle lh,
final GenericCallback<Set<LedgerFragment>> cb) {
checkLedger(lh, cb, 0L);
}
public void checkLedger(final LedgerHandle lh,
final GenericCallback<Set<LedgerFragment>> cb,
long percentageOfLedgerFragmentToBeVerified) {
// build a set of all fragment replicas
final Set<LedgerFragment> fragments = new LinkedHashSet<>();
Long curEntryId = null;
List<BookieId> curEnsemble = null;
for (Map.Entry<Long, ? extends List<BookieId>> e : lh
.getLedgerMetadata().getAllEnsembles().entrySet()) {
if (curEntryId != null) {
Set<Integer> bookieIndexes = new HashSet<Integer>();
for (int i = 0; i < curEnsemble.size(); i++) {
bookieIndexes.add(i);
}
fragments.add(new LedgerFragment(lh, curEntryId,
e.getKey() - 1, bookieIndexes));
}
curEntryId = e.getKey();
curEnsemble = e.getValue();
}
/* Checking the last segment of the ledger can be complicated in some cases.
* In the case that the ledger is closed, we can just check the fragments of
* the segment as normal even if no data has ever been written to.
* In the case that the ledger is open, but enough entries have been written,
* for lastAddConfirmed to be set above the start entry of the segment, we
* can also check as normal.
* However, if ledger is open, sometimes lastAddConfirmed cannot be trusted,
* such as when it's lower than the first entry id, or not set at all,
* we cannot be sure if there has been data written to the segment.
* For this reason, we have to send a read request
* to the bookies which should have the first entry. If they respond with
* NoSuchEntry we can assume it was never written. If they respond with anything
* else, we must assume the entry has been written, so we run the check.
*/
if (curEntryId != null) {
long lastEntry = lh.getLastAddConfirmed();
if (!lh.isClosed() && lastEntry < curEntryId) {
lastEntry = curEntryId;
}
Set<Integer> bookieIndexes = new HashSet<Integer>();
for (int i = 0; i < curEnsemble.size(); i++) {
bookieIndexes.add(i);
}
final LedgerFragment lastLedgerFragment = new LedgerFragment(lh, curEntryId,
lastEntry, bookieIndexes);
// Check for the case that no last confirmed entry has been set
if (curEntryId == lastEntry) {
final long entryToRead = curEntryId;
final CompletableFuture<Void> future = new CompletableFuture<>();
future.whenCompleteAsync((re, ex) -> {
checkFragments(fragments, cb, percentageOfLedgerFragmentToBeVerified);
});
final EntryExistsCallback eecb = new EntryExistsCallback(lh.getLedgerMetadata().getWriteQuorumSize(),
new GenericCallback<Boolean>() {
@Override
public void operationComplete(int rc, Boolean result) {
if (result) {
fragments.add(lastLedgerFragment);
}
future.complete(null);
}
});
DistributionSchedule ds = lh.getDistributionSchedule();
for (int i = 0; i < ds.getWriteQuorumSize(); i++) {
try {
acquirePermit();
BookieId addr = curEnsemble.get(ds.getWriteSetBookieIndex(entryToRead, i));
bookieClient.readEntry(addr, lh.getId(), entryToRead,
eecb, null, BookieProtocol.FLAG_NONE);
} catch (InterruptedException e) {
LOG.error("InterruptedException when checking entry : {}", entryToRead, e);
}
}
return;
} else {
fragments.add(lastLedgerFragment);
}
}
checkFragments(fragments, cb, percentageOfLedgerFragmentToBeVerified);
}
private void checkFragments(Set<LedgerFragment> fragments,
GenericCallback<Set<LedgerFragment>> cb,
long percentageOfLedgerFragmentToBeVerified) {
if (fragments.size() == 0) { // no fragments to verify
cb.operationComplete(BKException.Code.OK, fragments);
return;
}
// verify all the collected fragment replicas
FullLedgerCallback allFragmentsCb = new FullLedgerCallback(fragments
.size(), cb);
for (LedgerFragment r : fragments) {
if (LOG.isDebugEnabled()) {
LOG.debug("Checking fragment {}", r);
}
try {
verifyLedgerFragment(r, allFragmentsCb, percentageOfLedgerFragmentToBeVerified);
} catch (InvalidFragmentException ife) {
LOG.error("Invalid fragment found : {}", r);
allFragmentsCb.operationComplete(
BKException.Code.IncorrectParameterException, r);
} catch (BKException e) {
LOG.error("BKException when checking fragment : {}", r, e);
} catch (InterruptedException e) {
LOG.error("InterruptedException when checking fragment : {}", r, e);
}
}
}
}
| 340 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/TopologyAwareEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_JOINED;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_LEFT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Supplier;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.WeightedRandomSelection.WeightedObject;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.net.NetUtils;
import org.apache.bookkeeper.net.NetworkTopology;
import org.apache.bookkeeper.net.NetworkTopologyImpl;
import org.apache.bookkeeper.net.Node;
import org.apache.bookkeeper.net.NodeBase;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
abstract class TopologyAwareEnsemblePlacementPolicy implements
ITopologyAwareEnsemblePlacementPolicy<BookieNode> {
static final Logger LOG = LoggerFactory.getLogger(TopologyAwareEnsemblePlacementPolicy.class);
public static final String REPP_DNS_RESOLVER_CLASS = "reppDnsResolverClass";
protected final Map<BookieId, BookieNode> knownBookies = new HashMap<BookieId, BookieNode>();
protected final Map<BookieId, BookieNode> historyBookies = new HashMap<BookieId, BookieNode>();
protected final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
protected Map<BookieNode, WeightedObject> bookieInfoMap = new HashMap<BookieNode, WeightedObject>();
// Initialize to empty set
protected ImmutableSet<BookieId> readOnlyBookies = ImmutableSet.of();
boolean isWeighted;
protected WeightedRandomSelection<BookieNode> weightedSelection;
// for now, we just maintain the writable bookies' topology
protected NetworkTopology topology;
protected DNSToSwitchMapping dnsResolver;
protected BookieAddressResolver bookieAddressResolver;
@StatsDoc(
name = BOOKIES_JOINED,
help = "The distribution of number of bookies joined the cluster on each network topology change"
)
protected OpStatsLogger bookiesJoinedCounter = null;
@StatsDoc(
name = BOOKIES_LEFT,
help = "The distribution of number of bookies left the cluster on each network topology change"
)
protected OpStatsLogger bookiesLeftCounter = null;
protected static class TruePredicate implements Predicate<BookieNode> {
public static final TruePredicate INSTANCE = new TruePredicate();
@Override
public boolean apply(BookieNode candidate, Ensemble chosenNodes) {
return true;
}
}
protected static class EnsembleForReplacementWithNoConstraints implements Ensemble<BookieNode> {
public static final EnsembleForReplacementWithNoConstraints INSTANCE =
new EnsembleForReplacementWithNoConstraints();
static final List<BookieId> EMPTY_LIST = new ArrayList<BookieId>(0);
@Override
public boolean addNode(BookieNode node) {
// do nothing
return true;
}
@Override
public List<BookieId> toList() {
return EMPTY_LIST;
}
/**
* Validates if an ensemble is valid.
*
* @return true if the ensemble is valid; false otherwise
*/
@Override
public boolean validate() {
return true;
}
}
/**
* A predicate checking the rack coverage for write quorum in {@link RoundRobinDistributionSchedule},
* which ensures that a write quorum should be covered by at least two racks.
*/
protected static class RRTopologyAwareCoverageEnsemble implements Predicate<BookieNode>, Ensemble<BookieNode> {
protected interface CoverageSet {
boolean apply(BookieNode candidate);
void addBookie(BookieNode candidate);
CoverageSet duplicate();
}
protected class RackQuorumCoverageSet implements CoverageSet {
HashSet<String> racksOrRegionsInQuorum = new HashSet<String>();
int seenBookies = 0;
private final int minNumRacksPerWriteQuorum;
protected RackQuorumCoverageSet(int minNumRacksPerWriteQuorum) {
this.minNumRacksPerWriteQuorum = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
}
@Override
public boolean apply(BookieNode candidate) {
// If we don't have sufficient members in the write quorum; then we cant enforce
// rack/region diversity
if (writeQuorumSize < 2) {
return true;
}
/*
* allow the initial writeQuorumSize-minRacksToWriteTo+1 bookies
* to be placed on any rack(including on a single rack). But
* after that make sure that with each new bookie chosen, we
* will be able to satisfy the minRackToWriteTo condition
* eventually
*/
if (seenBookies + minNumRacksPerWriteQuorum - 1 >= writeQuorumSize) {
int numRacks = racksOrRegionsInQuorum.size();
if (!racksOrRegionsInQuorum.contains(candidate.getNetworkLocation(distanceFromLeaves))) {
numRacks++;
}
if (numRacks >= minNumRacksPerWriteQuorum
|| ((writeQuorumSize - seenBookies - 1) >= (minNumRacksPerWriteQuorum - numRacks))) {
/*
* either we have reached our goal or we still have a
* few bookies to be selected with which to catch up to
* the goal
*/
return true;
} else {
return false;
}
}
return true;
}
@Override
public void addBookie(BookieNode candidate) {
++seenBookies;
racksOrRegionsInQuorum.add(candidate.getNetworkLocation(distanceFromLeaves));
}
@Override
public RackQuorumCoverageSet duplicate() {
RackQuorumCoverageSet ret = new RackQuorumCoverageSet(this.minNumRacksPerWriteQuorum);
ret.racksOrRegionsInQuorum = Sets.newHashSet(this.racksOrRegionsInQuorum);
ret.seenBookies = this.seenBookies;
return ret;
}
}
protected class RackOrRegionDurabilityCoverageSet implements CoverageSet {
HashMap<String, Integer> allocationToRacksOrRegions = new HashMap<String, Integer>();
RackOrRegionDurabilityCoverageSet() {
for (String rackOrRegion: racksOrRegions) {
allocationToRacksOrRegions.put(rackOrRegion, 0);
}
}
@Override
public RackOrRegionDurabilityCoverageSet duplicate() {
RackOrRegionDurabilityCoverageSet ret = new RackOrRegionDurabilityCoverageSet();
ret.allocationToRacksOrRegions = Maps.newHashMap(this.allocationToRacksOrRegions);
return ret;
}
private boolean checkSumOfSubsetWithinLimit(final Set<String> includedRacksOrRegions,
final Set<String> remainingRacksOrRegions,
int subsetSize,
int maxAllowedSum) {
if (remainingRacksOrRegions.isEmpty() || (subsetSize <= 0)) {
if (maxAllowedSum < 0) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"CHECK FAILED: RacksOrRegions Included {} Remaining {}, subsetSize {}, "
+ "maxAllowedSum {}",
includedRacksOrRegions, remainingRacksOrRegions, subsetSize, maxAllowedSum);
}
}
return (maxAllowedSum >= 0);
}
for (String rackOrRegion: remainingRacksOrRegions) {
Integer currentAllocation = allocationToRacksOrRegions.get(rackOrRegion);
if (currentAllocation == null) {
allocationToRacksOrRegions.put(rackOrRegion, 0);
currentAllocation = 0;
}
if (currentAllocation > maxAllowedSum) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"CHECK FAILED: RacksOrRegions Included {} Candidate {}, subsetSize {}, "
+ "maxAllowedSum {}",
includedRacksOrRegions, rackOrRegion, subsetSize, maxAllowedSum);
}
return false;
} else {
Set<String> remainingElements = new HashSet<String>(remainingRacksOrRegions);
Set<String> includedElements = new HashSet<String>(includedRacksOrRegions);
includedElements.add(rackOrRegion);
remainingElements.remove(rackOrRegion);
if (!checkSumOfSubsetWithinLimit(includedElements,
remainingElements,
subsetSize - 1,
maxAllowedSum - currentAllocation)) {
return false;
}
}
}
return true;
}
@Override
public boolean apply(BookieNode candidate) {
if (minRacksOrRegionsForDurability <= 1) {
return true;
}
String candidateRackOrRegion = candidate.getNetworkLocation(distanceFromLeaves);
candidateRackOrRegion = candidateRackOrRegion.startsWith(NodeBase.PATH_SEPARATOR_STR)
? candidateRackOrRegion.substring(1) : candidateRackOrRegion;
final Set<String> remainingRacksOrRegions = new HashSet<String>(racksOrRegions);
remainingRacksOrRegions.remove(candidateRackOrRegion);
final Set<String> includedRacksOrRegions = new HashSet<String>();
includedRacksOrRegions.add(candidateRackOrRegion);
// If minRacksOrRegionsForDurability are required for durability; we must ensure that
// no subset of (minRacksOrRegionsForDurability - 1) regions have ackQuorumSize
// We are only modifying candidateRackOrRegion if we accept this bookie, so lets only
// find sets that contain this candidateRackOrRegion
Integer currentAllocation = allocationToRacksOrRegions.get(candidateRackOrRegion);
if (currentAllocation == null) {
LOG.info("Detected a region that was not initialized {}", candidateRackOrRegion);
if (candidateRackOrRegion.equals(NetworkTopology.DEFAULT_REGION)) {
LOG.error("Failed to resolve network location {}", candidate);
} else if (!racksOrRegions.contains(candidateRackOrRegion)) {
LOG.error("Unknown region detected {}", candidateRackOrRegion);
}
allocationToRacksOrRegions.put(candidateRackOrRegion, 0);
currentAllocation = 0;
}
int inclusiveLimit = (ackQuorumSize - 1) - (currentAllocation + 1);
return checkSumOfSubsetWithinLimit(includedRacksOrRegions,
remainingRacksOrRegions, minRacksOrRegionsForDurability - 2, inclusiveLimit);
}
@Override
public void addBookie(BookieNode candidate) {
String candidateRackOrRegion = candidate.getNetworkLocation(distanceFromLeaves);
candidateRackOrRegion = candidateRackOrRegion.startsWith(NodeBase.PATH_SEPARATOR_STR)
? candidateRackOrRegion.substring(1) : candidateRackOrRegion;
int oldCount = 0;
if (null != allocationToRacksOrRegions.get(candidateRackOrRegion)) {
oldCount = allocationToRacksOrRegions.get(candidateRackOrRegion);
}
allocationToRacksOrRegions.put(candidateRackOrRegion, oldCount + 1);
}
}
final int distanceFromLeaves;
final int ensembleSize;
final int writeQuorumSize;
final int ackQuorumSize;
final int minRacksOrRegionsForDurability;
final int minNumRacksPerWriteQuorum;
final List<BookieNode> chosenNodes;
final Set<String> racksOrRegions;
private final CoverageSet[] quorums;
final Predicate<BookieNode> parentPredicate;
final Ensemble<BookieNode> parentEnsemble;
protected RRTopologyAwareCoverageEnsemble(RRTopologyAwareCoverageEnsemble that) {
this.distanceFromLeaves = that.distanceFromLeaves;
this.ensembleSize = that.ensembleSize;
this.writeQuorumSize = that.writeQuorumSize;
this.ackQuorumSize = that.ackQuorumSize;
this.chosenNodes = Lists.newArrayList(that.chosenNodes);
this.quorums = new CoverageSet[that.quorums.length];
for (int i = 0; i < that.quorums.length; i++) {
if (null != that.quorums[i]) {
this.quorums[i] = that.quorums[i].duplicate();
} else {
this.quorums[i] = null;
}
}
this.parentPredicate = that.parentPredicate;
this.parentEnsemble = that.parentEnsemble;
if (null != that.racksOrRegions) {
this.racksOrRegions = new HashSet<String>(that.racksOrRegions);
} else {
this.racksOrRegions = null;
}
this.minRacksOrRegionsForDurability = that.minRacksOrRegionsForDurability;
this.minNumRacksPerWriteQuorum = that.minNumRacksPerWriteQuorum;
}
protected RRTopologyAwareCoverageEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
int distanceFromLeaves,
Set<String> racksOrRegions,
int minRacksOrRegionsForDurability,
int minNumRacksPerWriteQuorum) {
this(ensembleSize, writeQuorumSize, ackQuorumSize, distanceFromLeaves, null, null, racksOrRegions,
minRacksOrRegionsForDurability, minNumRacksPerWriteQuorum);
}
protected RRTopologyAwareCoverageEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
int distanceFromLeaves,
Ensemble<BookieNode> parentEnsemble,
Predicate<BookieNode> parentPredicate,
int minNumRacksPerWriteQuorum) {
this(ensembleSize, writeQuorumSize, ackQuorumSize, distanceFromLeaves, parentEnsemble, parentPredicate,
null, 0, minNumRacksPerWriteQuorum);
}
protected RRTopologyAwareCoverageEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
int distanceFromLeaves,
Ensemble<BookieNode> parentEnsemble,
Predicate<BookieNode> parentPredicate,
Set<String> racksOrRegions,
int minRacksOrRegionsForDurability,
int minNumRacksPerWriteQuorum) {
this.ensembleSize = ensembleSize;
this.writeQuorumSize = writeQuorumSize;
this.ackQuorumSize = ackQuorumSize;
this.distanceFromLeaves = distanceFromLeaves;
this.chosenNodes = new ArrayList<BookieNode>(ensembleSize);
if (minRacksOrRegionsForDurability > 0) {
this.quorums = new RackOrRegionDurabilityCoverageSet[ensembleSize];
} else {
this.quorums = new RackQuorumCoverageSet[ensembleSize];
}
this.parentEnsemble = parentEnsemble;
this.parentPredicate = parentPredicate;
this.racksOrRegions = racksOrRegions;
this.minRacksOrRegionsForDurability = minRacksOrRegionsForDurability;
this.minNumRacksPerWriteQuorum = minNumRacksPerWriteQuorum;
}
@Override
public boolean apply(BookieNode candidate, Ensemble<BookieNode> ensemble) {
if (ensemble != this) {
return false;
}
// An ensemble cannot contain the same node twice
if (chosenNodes.contains(candidate)) {
return false;
}
// candidate position
if ((ensembleSize == writeQuorumSize) && (minRacksOrRegionsForDurability > 0)) {
if (null == quorums[0]) {
quorums[0] = new RackOrRegionDurabilityCoverageSet();
}
if (!quorums[0].apply(candidate)) {
return false;
}
} else {
int candidatePos = chosenNodes.size();
int startPos = candidatePos - writeQuorumSize + 1;
for (int i = startPos; i <= candidatePos; i++) {
int idx = (i + ensembleSize) % ensembleSize;
if (null == quorums[idx]) {
if (minRacksOrRegionsForDurability > 0) {
quorums[idx] = new RackOrRegionDurabilityCoverageSet();
} else {
quorums[idx] = new RackQuorumCoverageSet(this.minNumRacksPerWriteQuorum);
}
}
if (!quorums[idx].apply(candidate)) {
return false;
}
}
}
return ((null == parentPredicate) || parentPredicate.apply(candidate, parentEnsemble));
}
@Override
public boolean addNode(BookieNode node) {
// An ensemble cannot contain the same node twice
if (chosenNodes.contains(node)) {
return false;
}
if ((ensembleSize == writeQuorumSize) && (minRacksOrRegionsForDurability > 0)) {
if (null == quorums[0]) {
quorums[0] = new RackOrRegionDurabilityCoverageSet();
}
quorums[0].addBookie(node);
} else {
int candidatePos = chosenNodes.size();
int startPos = candidatePos - writeQuorumSize + 1;
for (int i = startPos; i <= candidatePos; i++) {
int idx = (i + ensembleSize) % ensembleSize;
if (null == quorums[idx]) {
if (minRacksOrRegionsForDurability > 0) {
quorums[idx] = new RackOrRegionDurabilityCoverageSet();
} else {
quorums[idx] = new RackQuorumCoverageSet(this.minNumRacksPerWriteQuorum);
}
}
quorums[idx].addBookie(node);
}
}
chosenNodes.add(node);
return ((null == parentEnsemble) || parentEnsemble.addNode(node));
}
@Override
public List<BookieId> toList() {
ArrayList<BookieId> addresses = new ArrayList<BookieId>(ensembleSize);
for (BookieNode bn : chosenNodes) {
addresses.add(bn.getAddr());
}
return addresses;
}
/**
* Validates if an ensemble is valid.
*
* @return true if the ensemble is valid; false otherwise
*/
@Override
public boolean validate() {
HashSet<BookieId> addresses = new HashSet<BookieId>(ensembleSize);
HashSet<String> racksOrRegions = new HashSet<String>();
for (BookieNode bn : chosenNodes) {
if (addresses.contains(bn.getAddr())) {
return false;
}
addresses.add(bn.getAddr());
racksOrRegions.add(bn.getNetworkLocation(distanceFromLeaves));
}
return ((minRacksOrRegionsForDurability == 0)
|| (racksOrRegions.size() >= minRacksOrRegionsForDurability));
}
@Override
public String toString() {
return chosenNodes.toString();
}
}
static class DefaultResolver implements DNSToSwitchMapping {
final Supplier<String> defaultRackSupplier;
public DefaultResolver(Supplier<String> defaultRackSupplier) {
checkNotNull(defaultRackSupplier, "defaultRackSupplier should not be null");
this.defaultRackSupplier = defaultRackSupplier;
}
@Override
public List<String> resolve(List<String> names) {
List<String> rNames = new ArrayList<String>(names.size());
for (@SuppressWarnings("unused") String name : names) {
final String defaultRack = defaultRackSupplier.get();
checkNotNull(defaultRack, "defaultRack cannot be null");
rNames.add(defaultRack);
}
return rNames;
}
@Override
public void reloadCachedMappings() {
// nop
}
}
/**
* Decorator for any existing dsn resolver.
* Backfills returned data with appropriate default rack info.
*/
static class DNSResolverDecorator implements DNSToSwitchMapping {
final Supplier<String> defaultRackSupplier;
final DNSToSwitchMapping resolver;
@StatsDoc(
name = FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT,
help = "total number of times Resolver failed to resolve rack information of a node"
)
final Counter failedToResolveNetworkLocationCounter;
DNSResolverDecorator(DNSToSwitchMapping resolver, Supplier<String> defaultRackSupplier,
Counter failedToResolveNetworkLocationCounter) {
checkNotNull(resolver, "Resolver cannot be null");
checkNotNull(defaultRackSupplier, "defaultRackSupplier should not be null");
this.defaultRackSupplier = defaultRackSupplier;
this.resolver = resolver;
this.failedToResolveNetworkLocationCounter = failedToResolveNetworkLocationCounter;
}
@Override
public void setBookieAddressResolver(BookieAddressResolver bookieAddressResolver) {
this.resolver.setBookieAddressResolver(bookieAddressResolver);
}
@Override
public List<String> resolve(List<String> names) {
if (names == null) {
return Collections.emptyList();
}
final String defaultRack = defaultRackSupplier.get();
checkNotNull(defaultRack, "Default rack cannot be null");
List<String> rNames = resolver.resolve(names);
if (rNames != null && rNames.size() == names.size()) {
for (int i = 0; i < rNames.size(); ++i) {
if (rNames.get(i) == null) {
LOG.warn("Failed to resolve network location for {}, using default rack for it : {}.",
names.get(i), defaultRack);
failedToResolveNetworkLocationCounter.inc();
rNames.set(i, defaultRack);
}
}
return rNames;
}
LOG.warn("Failed to resolve network location for {}, using default rack for them : {}.", names,
defaultRack);
rNames = new ArrayList<>(names.size());
for (int i = 0; i < names.size(); ++i) {
failedToResolveNetworkLocationCounter.inc();
rNames.add(defaultRack);
}
return rNames;
}
@Override
public boolean useHostName() {
return resolver.useHostName();
}
@Override
public void reloadCachedMappings() {
resolver.reloadCachedMappings();
}
}
static Set<String> getNetworkLocations(Set<Node> bookieNodes) {
Set<String> networkLocs = new HashSet<>();
for (Node bookieNode : bookieNodes) {
networkLocs.add(bookieNode.getNetworkLocation());
}
return networkLocs;
}
/**
* Shuffle all the entries of an array that matches a mask.
* It assumes all entries with the same mask are contiguous in the array.
*/
static void shuffleWithMask(DistributionSchedule.WriteSet writeSet,
int mask, int bits) {
int first = -1;
int last = -1;
for (int i = 0; i < writeSet.size(); i++) {
if ((writeSet.get(i) & bits) == mask) {
if (first == -1) {
first = i;
}
last = i;
}
}
if (first != -1) {
for (int i = last + 1; i > first; i--) {
int swapWith = ThreadLocalRandom.current().nextInt(i);
writeSet.set(swapWith, writeSet.set(i, writeSet.get(swapWith)));
}
}
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return writeSet;
}
@Override
public DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
DistributionSchedule.WriteSet retList = reorderReadSequence(
ensemble, bookiesHealthInfo, writeSet);
retList.addMissingIndices(ensemble.size());
return retList;
}
@Override
public Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies) {
rwLock.writeLock().lock();
try {
ImmutableSet<BookieId> joinedBookies, leftBookies, deadBookies;
Set<BookieId> oldBookieSet = knownBookies.keySet();
// left bookies : bookies in known bookies, but not in new writable bookie cluster.
leftBookies = Sets.difference(oldBookieSet, writableBookies).immutableCopy();
// joined bookies : bookies in new writable bookie cluster, but not in known bookies
joinedBookies = Sets.difference(writableBookies, oldBookieSet).immutableCopy();
// dead bookies.
deadBookies = Sets.difference(leftBookies, readOnlyBookies).immutableCopy();
if (LOG.isDebugEnabled()) {
LOG.debug("Cluster changed : left bookies are {}, joined bookies are {}, while dead bookies are {}.",
leftBookies, joinedBookies, deadBookies);
}
handleBookiesThatLeft(leftBookies);
handleBookiesThatJoined(joinedBookies);
if (this.isWeighted && (leftBookies.size() > 0 || joinedBookies.size() > 0)) {
this.weightedSelection.updateMap(this.bookieInfoMap);
}
if (!readOnlyBookies.isEmpty()) {
this.readOnlyBookies = ImmutableSet.copyOf(readOnlyBookies);
}
return deadBookies;
} finally {
rwLock.writeLock().unlock();
}
}
/*
* this method should be called in writelock scope of 'rwLock'
*/
@Override
public void handleBookiesThatLeft(Set<BookieId> leftBookies) {
for (BookieId addr : leftBookies) {
try {
BookieNode node = knownBookies.remove(addr);
if (null != node) {
topology.remove(node);
if (this.isWeighted) {
this.bookieInfoMap.remove(node);
}
bookiesLeftCounter.registerSuccessfulValue(1L);
if (LOG.isDebugEnabled()) {
LOG.debug("Cluster changed : bookie {} left from cluster.", addr);
}
}
} catch (Throwable t) {
LOG.error("Unexpected exception while handling leaving bookie {}", addr, t);
if (bookiesLeftCounter != null) {
bookiesLeftCounter.registerFailedValue(1L);
}
// no need to re-throw; we want to process the rest of the bookies
// exception anyways will be caught/logged/suppressed in the ZK's event handler
}
}
}
/*
* this method should be called in writelock scope of 'rwLock'
*/
@Override
public void handleBookiesThatJoined(Set<BookieId> joinedBookies) {
// node joined
for (BookieId addr : joinedBookies) {
try {
BookieNode node = createBookieNode(addr);
topology.add(node);
knownBookies.put(addr, node);
historyBookies.put(addr, node);
if (this.isWeighted) {
this.bookieInfoMap.putIfAbsent(node, new BookieInfo());
}
bookiesJoinedCounter.registerSuccessfulValue(1L);
if (LOG.isDebugEnabled()) {
LOG.debug("Cluster changed : bookie {} joined the cluster.", addr);
}
} catch (Throwable t) {
// topology.add() throws unchecked exception
LOG.error("Unexpected exception while handling joining bookie {}", addr, t);
bookiesJoinedCounter.registerFailedValue(1L);
// no need to re-throw; we want to process the rest of the bookies
// exception anyways will be caught/logged/suppressed in the ZK's event handler
}
}
}
@Override
public void onBookieRackChange(List<BookieId> bookieAddressList) {
rwLock.writeLock().lock();
try {
bookieAddressList.forEach(bookieAddress -> {
try {
BookieNode node = knownBookies.get(bookieAddress);
if (node != null) {
// refresh the rack info if its a known bookie
BookieNode newNode = createBookieNode(bookieAddress);
if (!newNode.getNetworkLocation().equals(node.getNetworkLocation())) {
topology.remove(node);
topology.add(newNode);
knownBookies.put(bookieAddress, newNode);
historyBookies.put(bookieAddress, newNode);
}
}
} catch (IllegalArgumentException | NetworkTopologyImpl.InvalidTopologyException e) {
LOG.error("Failed to update bookie rack info: {} ", bookieAddress, e);
}
});
} finally {
rwLock.writeLock().unlock();
}
}
public static int differBetweenBookies(List<BookieId> bookiesA, List<BookieId> bookiesB) {
if (CollectionUtils.isEmpty(bookiesA) || CollectionUtils.isEmpty(bookiesB)) {
return Integer.MAX_VALUE;
}
if (bookiesA.size() != bookiesB.size()) {
return Integer.MAX_VALUE;
}
int differ = 0;
for (int i = 0; i < bookiesA.size(); i++) {
if (!bookiesA.get(i).equals(bookiesB.get(i))) {
differ++;
}
}
return differ;
}
@Override
public void updateBookieInfo(Map<BookieId, BookieInfo> bookieInfoMap) {
if (!isWeighted) {
LOG.info("bookieFreeDiskInfo callback called even without weighted placement policy being used.");
return;
}
rwLock.writeLock().lock();
try {
List<BookieNode> allBookies = new ArrayList<BookieNode>(knownBookies.values());
// create a new map to reflect the new mapping
Map<BookieNode, WeightedObject> map = new HashMap<BookieNode, WeightedObject>();
for (BookieNode bookie : allBookies) {
if (bookieInfoMap.containsKey(bookie.getAddr())) {
map.put(bookie, bookieInfoMap.get(bookie.getAddr()));
} else {
map.put(bookie, new BookieInfo());
}
}
this.bookieInfoMap = map;
this.weightedSelection.updateMap(this.bookieInfoMap);
} finally {
rwLock.writeLock().unlock();
}
}
protected BookieNode createBookieNode(BookieId addr) {
return new BookieNode(addr, resolveNetworkLocation(addr));
}
protected BookieNode createDummyLocalBookieNode(String hostname) {
return new BookieNode(BookieSocketAddress.createDummyBookieIdForHostname(hostname),
NetUtils.resolveNetworkLocation(dnsResolver, new BookieSocketAddress(hostname, 0)));
}
protected String resolveNetworkLocation(BookieId addr) {
try {
return NetUtils.resolveNetworkLocation(dnsResolver, bookieAddressResolver.resolve(addr));
} catch (BookieAddressResolver.BookieIdNotResolvedException err) {
BookieNode historyBookie = historyBookies.get(addr);
if (null != historyBookie) {
return historyBookie.getNetworkLocation();
}
LOG.error("Cannot resolve bookieId {} to a network address, resolving as {}", addr,
NetworkTopology.DEFAULT_REGION_AND_RACK, err);
return NetworkTopology.DEFAULT_REGION_AND_RACK;
}
}
protected Set<Node> convertBookiesToNodes(Collection<BookieId> bookies) {
Set<Node> nodes = new HashSet<Node>();
for (BookieId addr : bookies) {
nodes.add(convertBookieToNode(addr));
}
return nodes;
}
protected BookieNode convertBookieToNode(BookieId addr) {
BookieNode bn = knownBookies.get(addr);
if (null == bn) {
bn = createBookieNode(addr);
}
return bn;
}
}
| 341 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookiesListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import org.apache.bookkeeper.common.annotation.InterfaceAudience;
import org.apache.bookkeeper.common.annotation.InterfaceStability;
/**
* Listener for the available bookies changes.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface BookiesListener {
/**
* Callback method triggered when the list of available bookies are changed.
*/
void availableBookiesChanged();
}
| 342 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandleAdv.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.Serializable;
import java.security.GeneralSecurityException;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.AsyncCallback.AddCallbackWithLatency;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncAddCallback;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.WriteAdvHandle;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Ledger Advanced handle extends {@link LedgerHandle} to provide API to add entries with
* user supplied entryIds. Through this interface Ledger Length may not be accurate while the
* ledger being written.
*/
public class LedgerHandleAdv extends LedgerHandle implements WriteAdvHandle {
static final Logger LOG = LoggerFactory.getLogger(LedgerHandleAdv.class);
static class PendingOpsComparator implements Comparator<PendingAddOp>, Serializable {
@Override
public int compare(PendingAddOp o1, PendingAddOp o2) {
return Long.compare(o1.entryId, o2.entryId);
}
}
LedgerHandleAdv(ClientContext clientCtx,
long ledgerId, Versioned<LedgerMetadata> metadata,
BookKeeper.DigestType digestType, byte[] password, EnumSet<WriteFlag> writeFlags)
throws GeneralSecurityException, NumberFormatException {
super(clientCtx, ledgerId, metadata, digestType, password, writeFlags);
pendingAddOps = new PriorityBlockingQueue<PendingAddOp>(10, new PendingOpsComparator());
}
/**
* Add entry synchronously to an open ledger.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @return
* entryId that is just created.
*/
@Override
public long addEntry(final long entryId, byte[] data) throws InterruptedException, BKException {
return addEntry(entryId, data, 0, data.length);
}
/**
* Add entry synchronously to an open ledger.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @return The entryId of newly inserted entry.
*/
@Override
public long addEntry(final long entryId, byte[] data, int offset, int length) throws InterruptedException,
BKException {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding entry {}", data);
}
SyncAddCallback callback = new SyncAddCallback();
asyncAddEntry(entryId, data, offset, length, callback, null);
try {
return callback.get();
} catch (ExecutionException err) {
throw (BKException) err.getCause();
}
}
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
*/
@Override
public void asyncAddEntry(long entryId, byte[] data, AddCallback cb, Object ctx) {
asyncAddEntry(entryId, data, 0, data.length, cb, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
* @throws ArrayIndexOutOfBoundsException
* if offset or length is negative or offset and length sum to a
* value higher than the length of data.
*/
@Override
public void asyncAddEntry(final long entryId, final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
asyncAddEntry(entryId, Unpooled.wrappedBuffer(data, offset, length), cb, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
* @throws ArrayIndexOutOfBoundsException
* if offset or length is negative or offset and length sum to a
* value higher than the length of data.
*/
@Override
public void asyncAddEntry(final long entryId, final byte[] data, final int offset, final int length,
final AddCallbackWithLatency cb, final Object ctx) {
asyncAddEntry(entryId, Unpooled.wrappedBuffer(data, offset, length), cb, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
* This can be used only with {@link LedgerHandleAdv} returned through
* ledgers created with {@link BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
* @param entryId
* entryId of the entry to add.
* @param data
* io.netty.buffer.ByteBuf of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
*/
@Override
public void asyncAddEntry(final long entryId, ByteBuf data,
final AddCallbackWithLatency cb, final Object ctx) {
PendingAddOp op = PendingAddOp.create(this, clientCtx, getCurrentEnsemble(), data, writeFlags, cb, ctx);
op.setEntryId(entryId);
if ((entryId <= this.lastAddConfirmed) || pendingAddOps.contains(op)) {
LOG.error("Trying to re-add duplicate entryid:{}", entryId);
op.submitCallback(BKException.Code.DuplicateEntryIdException);
return;
}
doAsyncAddEntry(op);
}
/**
* Overriding part is mostly around setting entryId.
* Though there may be some code duplication, Choose to have the override routine so the control flow is
* unaltered in the base class.
*/
@Override
protected void doAsyncAddEntry(final PendingAddOp op) {
if (throttler != null) {
throttler.acquire();
}
boolean wasClosed = false;
synchronized (this) {
// synchronized on this to ensure that
// the ledger isn't closed between checking and
// updating lastAddPushed
if (isHandleWritable()) {
long currentLength = addToLength(op.payload.readableBytes());
op.setLedgerLength(currentLength);
pendingAddOps.add(op);
} else {
wasClosed = true;
}
}
if (wasClosed) {
// make sure the callback is triggered in main worker pool
try {
clientCtx.getMainWorkerPool().submit(new Runnable() {
@Override
public void run() {
LOG.warn("Attempt to add to closed ledger: {}", ledgerId);
op.cb.addCompleteWithLatency(BKException.Code.LedgerClosedException,
LedgerHandleAdv.this, op.getEntryId(), 0, op.ctx);
op.recyclePendAddOpObject();
}
@Override
public String toString() {
return String.format("AsyncAddEntryToClosedLedger(lid=%d)", ledgerId);
}
});
} catch (RejectedExecutionException e) {
op.cb.addCompleteWithLatency(BookKeeper.getReturnRc(clientCtx.getBookieClient(),
BKException.Code.InterruptedException),
LedgerHandleAdv.this, op.getEntryId(), 0, op.ctx);
op.recyclePendAddOpObject();
}
return;
}
if (clientCtx.getConf().waitForWriteSetMs >= 0) {
DistributionSchedule.WriteSet ws = distributionSchedule.getWriteSet(op.getEntryId());
try {
if (!waitForWritable(ws, 0, clientCtx.getConf().waitForWriteSetMs)) {
op.allowFailFastOnUnwritableChannel();
}
} finally {
ws.recycle();
}
}
op.initiate();
}
@Override
public CompletableFuture<Long> writeAsync(long entryId, ByteBuf data) {
SyncAddCallback callback = new SyncAddCallback();
asyncAddEntry(entryId, data, callback, data);
return callback;
}
/**
* LedgerHandleAdv will not allow addEntry without providing an entryId.
*/
@Override
public void asyncAddEntry(ByteBuf data, AddCallback cb, Object ctx) {
cb.addCompleteWithLatency(BKException.Code.IllegalOpException, this, LedgerHandle.INVALID_ENTRY_ID, 0, ctx);
}
/**
* LedgerHandleAdv will not allow addEntry without providing an entryId.
*/
@Override
public void asyncAddEntry(final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
cb.addComplete(BKException.Code.IllegalOpException, this, LedgerHandle.INVALID_ENTRY_ID, ctx);
}
}
| 343 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/SpeculativeRequestExecutionPolicy.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
/**
* Define a policy for speculative request execution.
*
* <p>The implementation can define its execution policy. For example, when to issue speculative requests
* and how many speculative requests to issue.
*
* @since 4.5
*/
public interface SpeculativeRequestExecutionPolicy {
/**
* Initialize the speculative request execution policy and initiate requests.
*
* @param scheduler The scheduler service to issue the speculative request
* @param requestExecutor The executor is used to issue the actual speculative requests
* @return ScheduledFuture, in case caller needs to cancel it.
*/
ScheduledFuture<?> initiateSpeculativeRequest(ScheduledExecutorService scheduler,
SpeculativeRequestExecutor requestExecutor);
}
| 344 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookieWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieAddressResolver;
/**
* Watch for Bookkeeper cluster status.
*/
public interface BookieWatcher {
Set<BookieId> getBookies() throws BKException;
Set<BookieId> getAllBookies() throws BKException;
Set<BookieId> getReadOnlyBookies() throws BKException;
BookieAddressResolver getBookieAddressResolver();
/**
* Determine if a bookie should be considered unavailable.
*
* @param id
* Bookie to check
* @return whether or not the given bookie is unavailable
*/
boolean isBookieUnavailable(BookieId id);
/**
* Create an ensemble with given <i>ensembleSize</i> and <i>writeQuorumSize</i>.
*
* @param ensembleSize
* Ensemble Size
* @param writeQuorumSize
* Write Quorum Size
* @return list of bookies for new ensemble.
* @throws BKNotEnoughBookiesException
*/
List<BookieId> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata)
throws BKNotEnoughBookiesException;
/**
* Choose a bookie to replace bookie <i>bookieIdx</i> in <i>existingBookies</i>.
* @param existingBookies
* list of existing bookies.
* @param bookieIdx
* index of the bookie in the list to be replaced.
* @return the bookie to replace.
* @throws BKNotEnoughBookiesException
*/
BookieId replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata,
List<BookieId> existingBookies, int bookieIdx,
Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException;
/**
* Quarantine <i>bookie</i> so it will not be preferred to be chosen for new ensembles.
* @param bookie
*/
void quarantineBookie(BookieId bookie);
/**
* Release all quarantined bookies, let it can be chosen for new ensembles.
*/
void releaseAllQuarantinedBookies();
}
| 345 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookiesHealthInfo.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import org.apache.bookkeeper.net.BookieId;
/**
* This interface returns heuristics used to determine the health of a Bookkeeper server for read
* ordering.
*/
public interface BookiesHealthInfo {
/**
* Return the failure history for a bookie.
*
* @param bookieSocketAddress
* @return failed entries on a bookie, -1 if there have been no failures
*/
long getBookieFailureHistory(BookieId bookieSocketAddress);
/**
* Returns pending requests to a bookie.
*
* @param bookieSocketAddress
* @return number of pending requests
*/
long getBookiePendingRequests(BookieId bookieSocketAddress);
}
| 346 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsembleUtils.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.net.BookieId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class EnsembleUtils {
private static final Logger LOG = LoggerFactory.getLogger(EnsembleUtils.class);
static List<BookieId> replaceBookiesInEnsemble(BookieWatcher bookieWatcher,
LedgerMetadata metadata,
List<BookieId> oldEnsemble,
Map<Integer, BookieId> failedBookies,
String logContext)
throws BKException.BKNotEnoughBookiesException {
List<BookieId> newEnsemble = new ArrayList<>(oldEnsemble);
int ensembleSize = metadata.getEnsembleSize();
int writeQ = metadata.getWriteQuorumSize();
int ackQ = metadata.getAckQuorumSize();
Map<String, byte[]> customMetadata = metadata.getCustomMetadata();
Set<BookieId> exclude = new HashSet<>(failedBookies.values());
int replaced = 0;
for (Map.Entry<Integer, BookieId> entry : failedBookies.entrySet()) {
int idx = entry.getKey();
BookieId addr = entry.getValue();
if (LOG.isDebugEnabled()) {
LOG.debug("{} replacing bookie: {} index: {}", logContext, addr, idx);
}
if (!newEnsemble.get(idx).equals(addr)) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} Not changing failed bookie {} at index {}, already changed to {}",
logContext, addr, idx, newEnsemble.get(idx));
}
continue;
}
try {
BookieId newBookie = bookieWatcher.replaceBookie(
ensembleSize, writeQ, ackQ, customMetadata, newEnsemble, idx, exclude);
newEnsemble.set(idx, newBookie);
replaced++;
} catch (BKException.BKNotEnoughBookiesException e) {
// if there is no bookie replaced, we throw not enough bookie exception
if (replaced <= 0) {
throw e;
} else {
break;
}
}
}
return newEnsemble;
}
static Set<Integer> diffEnsemble(List<BookieId> e1,
List<BookieId> e2) {
checkArgument(e1.size() == e2.size(), "Ensembles must be of same size");
Set<Integer> diff = new HashSet<>();
for (int i = 0; i < e1.size(); i++) {
if (!e1.get(i).equals(e2.get(i))) {
diff.add(i);
}
}
return diff;
}
}
| 347 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerCreateOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.security.GeneralSecurityException;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.AsyncCallback.CreateCallback;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncCreateAdvCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncCreateCallback;
import org.apache.bookkeeper.client.api.CreateAdvBuilder;
import org.apache.bookkeeper.client.api.CreateBuilder;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.WriteAdvHandle;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.client.api.WriteHandle;
import org.apache.bookkeeper.meta.LedgerIdGenerator;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encapsulates asynchronous ledger create operation.
*
*/
class LedgerCreateOp {
static final Logger LOG = LoggerFactory.getLogger(LedgerCreateOp.class);
final CreateCallback cb;
LedgerMetadata metadata;
LedgerHandle lh;
long ledgerId = -1L;
final Object ctx;
final int ensembleSize;
final int writeQuorumSize;
final int ackQuorumSize;
final Map<String, byte[]> customMetadata;
final int metadataFormatVersion;
final byte[] passwd;
final BookKeeper bk;
final DigestType digestType;
final EnumSet<WriteFlag> writeFlags;
final long startTime;
final OpStatsLogger createOpLogger;
final BookKeeperClientStats clientStats;
boolean adv = false;
boolean generateLedgerId = true;
/**
* Constructor.
*
* @param bk
* BookKeeper object
* @param ensembleSize
* ensemble size
* @param writeQuorumSize
* write quorum size
* @param ackQuorumSize
* ack quorum size
* @param digestType
* digest type, either MAC or CRC32
* @param passwd
* password
* @param cb
* callback implementation
* @param ctx
* optional control object
* @param customMetadata
* A map of user specified custom metadata about the ledger to be persisted; will not try to
* preserve the order(e.g. sortedMap) upon later retireval.
*/
LedgerCreateOp(
BookKeeper bk, int ensembleSize, int writeQuorumSize, int ackQuorumSize, DigestType digestType,
byte[] passwd, CreateCallback cb, Object ctx, final Map<String, byte[]> customMetadata,
EnumSet<WriteFlag> writeFlags,
BookKeeperClientStats clientStats) {
this.bk = bk;
this.metadataFormatVersion = bk.getConf().getLedgerMetadataFormatVersion();
this.ensembleSize = ensembleSize;
this.writeQuorumSize = writeQuorumSize;
this.ackQuorumSize = ackQuorumSize;
this.digestType = digestType;
this.customMetadata = customMetadata;
this.writeFlags = writeFlags;
this.passwd = passwd;
this.cb = cb;
this.ctx = ctx;
this.startTime = MathUtils.nowInNano();
this.createOpLogger = clientStats.getCreateOpLogger();
this.clientStats = clientStats;
}
/**
* Initiates the operation.
*/
public void initiate() {
int actualEnsembleSize = ensembleSize;
List<BookieId> ensemble = null;
// select bookies for first ensemble
if (bk.getConf().getOpportunisticStriping()) {
BKNotEnoughBookiesException lastError = null;
// we would like to select ensembleSize bookies, but
// we can settle to writeQuorumSize
while (actualEnsembleSize >= writeQuorumSize) {
try {
ensemble = bk.getBookieWatcher()
.newEnsemble(actualEnsembleSize, writeQuorumSize, ackQuorumSize, customMetadata);
lastError = null;
break;
} catch (BKNotEnoughBookiesException e) {
if (actualEnsembleSize >= writeQuorumSize + 1) {
LOG.info("Not enough bookies to create ledger with ensembleSize={},"
+ " writeQuorumSize={} and ackQuorumSize={}, opportusticStriping enabled, try again",
actualEnsembleSize, writeQuorumSize, ackQuorumSize);
}
lastError = e;
actualEnsembleSize--;
}
}
if (lastError != null) {
LOG.error("Not enough bookies to create ledger with ensembleSize={},"
+ " writeQuorumSize={} and ackQuorumSize={}",
actualEnsembleSize, writeQuorumSize, ackQuorumSize);
createComplete(lastError.getCode(), null);
return;
}
} else {
try {
ensemble = bk.getBookieWatcher()
.newEnsemble(actualEnsembleSize, writeQuorumSize, ackQuorumSize, customMetadata);
} catch (BKNotEnoughBookiesException e) {
LOG.error("Not enough bookies to create ledger with ensembleSize={},"
+ " writeQuorumSize={} and ackQuorumSize={}",
actualEnsembleSize, writeQuorumSize, ackQuorumSize);
createComplete(e.getCode(), null);
return;
}
}
LedgerMetadataBuilder metadataBuilder = LedgerMetadataBuilder.create()
.withEnsembleSize(actualEnsembleSize).withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
.withDigestType(digestType.toApiDigestType()).withPassword(passwd);
metadataBuilder.newEnsembleEntry(0L, ensemble);
if (customMetadata != null) {
metadataBuilder.withCustomMetadata(customMetadata);
}
metadataBuilder.withMetadataFormatVersion(metadataFormatVersion);
if (bk.getConf().getStoreSystemtimeAsLedgerCreationTime()) {
metadataBuilder.withCreationTime(System.currentTimeMillis()).storingCreationTime(true);
}
if (this.generateLedgerId) {
generateLedgerIdAndCreateLedger(metadataBuilder);
} else {
this.metadata = metadataBuilder.withId(ledgerId).build();
// Create ledger with supplied ledgerId
bk.getLedgerManager().createLedgerMetadata(ledgerId, metadata)
.whenComplete((written, exception) -> metadataCallback(written, exception, metadataBuilder));
}
}
void generateLedgerIdAndCreateLedger(LedgerMetadataBuilder metadataBuilder) {
// generate a ledgerId
final LedgerIdGenerator ledgerIdGenerator = bk.getLedgerIdGenerator();
ledgerIdGenerator.generateLedgerId(new GenericCallback<Long>() {
@Override
public void operationComplete(int rc, Long ledgerId) {
if (BKException.Code.OK != rc) {
createComplete(rc, null);
return;
}
LedgerCreateOp.this.ledgerId = ledgerId;
LedgerCreateOp.this.metadata = metadataBuilder.withId(ledgerId).build();
// create a ledger with metadata
bk.getLedgerManager().createLedgerMetadata(ledgerId, metadata)
.whenComplete((written, exception) -> metadataCallback(written, exception, metadataBuilder));
}
});
}
/**
* Initiates the operation to return LedgerHandleAdv.
*/
public void initiateAdv(final long ledgerId) {
this.adv = true;
this.ledgerId = ledgerId;
if (this.ledgerId != -1L) {
this.generateLedgerId = false;
}
initiate();
}
/**
* Callback when metadata store has responded.
*/
private void metadataCallback(Versioned<LedgerMetadata> writtenMetadata,
Throwable exception, LedgerMetadataBuilder metadataBuilder) {
if (exception != null) {
if (this.generateLedgerId
&& (BKException.getExceptionCode(exception) == BKException.Code.LedgerExistException)) {
// retry to generate a new ledger id
generateLedgerIdAndCreateLedger(metadataBuilder);
} else {
createComplete(BKException.getExceptionCode(exception), null);
}
} else {
try {
if (adv) {
lh = new LedgerHandleAdv(bk.getClientCtx(), ledgerId, writtenMetadata,
digestType, passwd, writeFlags);
} else {
lh = new LedgerHandle(bk.getClientCtx(), ledgerId, writtenMetadata, digestType, passwd, writeFlags);
}
} catch (GeneralSecurityException e) {
LOG.error("Security exception while creating ledger: " + ledgerId, e);
createComplete(BKException.Code.DigestNotInitializedException, null);
return;
} catch (NumberFormatException e) {
LOG.error("Incorrectly entered parameter throttle: " + bk.getConf().getThrottleValue(), e);
createComplete(BKException.Code.IncorrectParameterException, null);
return;
}
List<BookieId> curEns = lh.getLedgerMetadata().getEnsembleAt(0L);
LOG.info("Ensemble: {} for ledger: {}", curEns, lh.getId());
for (BookieId bsa : curEns) {
clientStats.getEnsembleBookieDistributionCounter(bsa.toString()).inc();
}
// return the ledger handle back
createComplete(BKException.Code.OK, lh);
}
}
private void createComplete(int rc, LedgerHandle lh) {
// Opened a new ledger
if (BKException.Code.OK != rc) {
createOpLogger.registerFailedEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
} else {
createOpLogger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
if (lh != null) { // lh is null in case of errors
lh.executeOrdered(() -> cb.createComplete(rc, lh, ctx));
} else {
cb.createComplete(rc, null, ctx);
}
}
public static class CreateBuilderImpl implements CreateBuilder {
private final BookKeeper bk;
private int builderEnsembleSize = 3;
private int builderAckQuorumSize = 2;
private int builderWriteQuorumSize = 2;
private byte[] builderPassword;
private EnumSet<WriteFlag> builderWriteFlags = WriteFlag.NONE;
private org.apache.bookkeeper.client.api.DigestType builderDigestType =
org.apache.bookkeeper.client.api.DigestType.CRC32;
private Map<String, byte[]> builderCustomMetadata = Collections.emptyMap();
CreateBuilderImpl(BookKeeper bk) {
this.bk = bk;
}
@Override
public CreateBuilder withEnsembleSize(int ensembleSize) {
this.builderEnsembleSize = ensembleSize;
return this;
}
@Override
public CreateBuilder withWriteFlags(EnumSet<WriteFlag> writeFlags) {
this.builderWriteFlags = writeFlags;
return this;
}
@Override
public CreateBuilder withWriteQuorumSize(int writeQuorumSize) {
this.builderWriteQuorumSize = writeQuorumSize;
return this;
}
@Override
public CreateBuilder withAckQuorumSize(int ackQuorumSize) {
this.builderAckQuorumSize = ackQuorumSize;
return this;
}
@SuppressFBWarnings("EI_EXPOSE_REP2")
@Override
public CreateBuilder withPassword(byte[] password) {
this.builderPassword = password;
return this;
}
@Override
public CreateBuilder withCustomMetadata(Map<String, byte[]> customMetadata) {
this.builderCustomMetadata = customMetadata;
return this;
}
@Override
public CreateBuilder withDigestType(org.apache.bookkeeper.client.api.DigestType digestType) {
this.builderDigestType = digestType;
return this;
}
@Override
public CreateAdvBuilder makeAdv() {
return new CreateAdvBuilderImpl(this);
}
private boolean validate() {
if (builderWriteFlags == null) {
LOG.error("invalid null writeFlags");
return false;
}
if (builderWriteQuorumSize > builderEnsembleSize) {
LOG.error("invalid writeQuorumSize {} > ensembleSize {}", builderWriteQuorumSize, builderEnsembleSize);
return false;
}
if (builderAckQuorumSize > builderWriteQuorumSize) {
LOG.error("invalid ackQuorumSize {} > writeQuorumSize {}", builderAckQuorumSize,
builderWriteQuorumSize);
return false;
}
if (builderAckQuorumSize <= 0) {
LOG.error("invalid ackQuorumSize {} <= 0", builderAckQuorumSize);
return false;
}
if (builderPassword == null) {
LOG.error("invalid null password");
return false;
}
if (builderDigestType == null) {
LOG.error("invalid null digestType");
return false;
}
if (builderCustomMetadata == null) {
LOG.error("invalid null customMetadata");
return false;
}
return true;
}
@Override
public CompletableFuture<WriteHandle> execute() {
CompletableFuture<WriteHandle> future = new CompletableFuture<>();
SyncCreateCallback callback = new SyncCreateCallback(future);
create(callback);
return future;
}
private void create(CreateCallback cb) {
if (!validate()) {
cb.createComplete(BKException.Code.IncorrectParameterException, null, null);
return;
}
LedgerCreateOp op = new LedgerCreateOp(bk, builderEnsembleSize,
builderWriteQuorumSize, builderAckQuorumSize, DigestType.fromApiDigestType(builderDigestType),
builderPassword, cb, null, builderCustomMetadata, builderWriteFlags,
bk.getClientCtx().getClientStats());
ReentrantReadWriteLock closeLock = bk.getCloseLock();
closeLock.readLock().lock();
try {
if (bk.isClosed()) {
cb.createComplete(BKException.Code.ClientClosedException, null, null);
return;
}
op.initiate();
} finally {
closeLock.readLock().unlock();
}
}
}
private static class CreateAdvBuilderImpl implements CreateAdvBuilder {
private Long builderLedgerId;
private final CreateBuilderImpl parent;
private CreateAdvBuilderImpl(CreateBuilderImpl parent) {
this.parent = parent;
}
@Override
public CreateAdvBuilder withLedgerId(long ledgerId) {
builderLedgerId = ledgerId;
return this;
}
@Override
public CompletableFuture<WriteAdvHandle> execute() {
CompletableFuture<WriteAdvHandle> future = new CompletableFuture<>();
SyncCreateAdvCallback callback = new SyncCreateAdvCallback(future);
create(callback);
return future;
}
private boolean validate() {
if (!parent.validate()) {
return false;
}
if (builderLedgerId != null && builderLedgerId < 0) {
LOG.error("invalid ledgerId {} < 0. Do not set en explicit value if you want automatic generation",
builderLedgerId);
return false;
}
return true;
}
private void create(CreateCallback cb) {
if (!validate()) {
cb.createComplete(BKException.Code.IncorrectParameterException, null, null);
return;
}
LedgerCreateOp op = new LedgerCreateOp(parent.bk, parent.builderEnsembleSize,
parent.builderWriteQuorumSize, parent.builderAckQuorumSize,
DigestType.fromApiDigestType(parent.builderDigestType),
parent.builderPassword, cb, null, parent.builderCustomMetadata,
parent.builderWriteFlags,
parent.bk.getClientCtx().getClientStats());
ReentrantReadWriteLock closeLock = parent.bk.getCloseLock();
closeLock.readLock().lock();
try {
if (parent.bk.isClosed()) {
cb.createComplete(BKException.Code.ClientClosedException, null, null);
return;
}
op.initiateAdv(builderLedgerId == null ? -1L : builderLedgerId);
} finally {
closeLock.readLock().unlock();
}
}
}
}
| 348 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/PendingWriteLacOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.BitSet;
import java.util.List;
import org.apache.bookkeeper.client.AsyncCallback.AddLacCallback;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteLacCallback;
import org.apache.bookkeeper.util.ByteBufList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a pending WriteLac operation. When it has got
* success from Ack Quorum bookies, sends success back to the application,
* otherwise failure is sent back to the caller.
*
* <p>This is an optional protocol operations to facilitate tailing readers
* to be up to date with the writer. This is best effort to get latest LAC
* from bookies, and doesn't affect the correctness of the protocol.
*/
class PendingWriteLacOp implements WriteLacCallback {
private static final Logger LOG = LoggerFactory.getLogger(PendingWriteLacOp.class);
ByteBufList toSend;
AddLacCallback cb;
long lac;
Object ctx;
BitSet receivedResponseSet;
DistributionSchedule.AckSet ackSet;
boolean completed = false;
int lastSeenError = BKException.Code.WriteException;
LedgerHandle lh;
ClientContext clientCtx;
final List<BookieId> currentEnsemble;
PendingWriteLacOp(LedgerHandle lh, ClientContext clientCtx, List<BookieId> ensemble,
AddLacCallback cb, Object ctx) {
this.lh = lh;
this.clientCtx = clientCtx;
this.cb = cb;
this.ctx = ctx;
this.lac = LedgerHandle.INVALID_ENTRY_ID;
ackSet = lh.distributionSchedule.getAckSet();
currentEnsemble = ensemble;
}
void setLac(long lac) {
this.lac = lac;
this.receivedResponseSet = new BitSet(
lh.getLedgerMetadata().getWriteQuorumSize());
this.receivedResponseSet.set(0,
lh.getLedgerMetadata().getWriteQuorumSize());
}
void sendWriteLacRequest(int bookieIndex) {
clientCtx.getBookieClient().writeLac(currentEnsemble.get(bookieIndex),
lh.ledgerId, lh.ledgerKey, lac, toSend, this, bookieIndex);
}
void initiate(ByteBufList toSend) {
this.toSend = toSend;
for (int i = 0; i < lh.distributionSchedule.getWriteQuorumSize(); i++) {
sendWriteLacRequest(lh.distributionSchedule.getWriteSetBookieIndex(lac, i));
}
}
@Override
public void writeLacComplete(int rc, long ledgerId, BookieId addr, Object ctx) {
int bookieIndex = (Integer) ctx;
if (completed) {
return;
}
if (BKException.Code.OK != rc) {
lastSeenError = rc;
}
// We got response.
receivedResponseSet.clear(bookieIndex);
if (rc == BKException.Code.OK) {
if (ackSet.completeBookieAndCheck(bookieIndex) && !completed) {
completed = true;
cb.addLacComplete(rc, lh, ctx);
return;
}
} else {
LOG.warn("WriteLac did not succeed: Ledger {} on {}", ledgerId, addr);
}
if (receivedResponseSet.isEmpty()){
completed = true;
cb.addLacComplete(lastSeenError, lh, ctx);
}
}
}
| 349 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerDeleteOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.AsyncCallback.DeleteCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncDeleteCallback;
import org.apache.bookkeeper.client.api.DeleteBuilder;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.versioning.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encapsulates asynchronous ledger delete operation.
*
*/
class LedgerDeleteOp {
static final Logger LOG = LoggerFactory.getLogger(LedgerDeleteOp.class);
final BookKeeper bk;
final long ledgerId;
final DeleteCallback cb;
final Object ctx;
final long startTime;
final OpStatsLogger deleteOpLogger;
/**
* Constructor.
*
* @param bk
* BookKeeper object
* @param ledgerId
* ledger Id
* @param cb
* callback implementation
* @param ctx
* optional control object
*/
LedgerDeleteOp(BookKeeper bk, BookKeeperClientStats clientStats,
long ledgerId, DeleteCallback cb, Object ctx) {
this.bk = bk;
this.ledgerId = ledgerId;
this.cb = cb;
this.ctx = ctx;
this.startTime = MathUtils.nowInNano();
this.deleteOpLogger = clientStats.getDeleteOpLogger();
}
/**
* Initiates the operation.
*/
public void initiate() {
// Asynchronously delete the ledger from meta manager
// When this completes, it will invoke the callback method below.
bk.getLedgerManager().removeLedgerMetadata(ledgerId, Version.ANY)
.whenCompleteAsync((ignore, exception) -> {
if (exception != null) {
deleteOpLogger.registerFailedEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
} else {
deleteOpLogger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
cb.deleteComplete(BKException.getExceptionCode(exception), this.ctx);
}, bk.getMainWorkerPool().chooseThread(ledgerId));
}
@Override
public String toString() {
return String.format("LedgerDeleteOp(%d)", ledgerId);
}
static class DeleteBuilderImpl implements DeleteBuilder {
private Long builderLedgerId;
private final BookKeeper bk;
DeleteBuilderImpl(BookKeeper bk) {
this.bk = bk;
}
@Override
public DeleteBuilder withLedgerId(long ledgerId) {
this.builderLedgerId = ledgerId;
return this;
}
@Override
public CompletableFuture<Void> execute() {
CompletableFuture<Void> future = new CompletableFuture<>();
SyncDeleteCallback result = new SyncDeleteCallback(future);
delete(builderLedgerId, result);
return future;
}
private boolean validate() {
if (builderLedgerId == null || builderLedgerId < 0) {
LOG.error("invalid ledgerId {} < 0", builderLedgerId);
return false;
}
return true;
}
private void delete(Long ledgerId, AsyncCallback.DeleteCallback cb) {
if (!validate()) {
cb.deleteComplete(BKException.Code.IncorrectParameterException, null);
return;
}
LedgerDeleteOp op = new LedgerDeleteOp(bk, bk.getClientCtx().getClientStats(), ledgerId, cb, null);
ReentrantReadWriteLock closeLock = bk.getCloseLock();
closeLock.readLock().lock();
try {
if (bk.isClosed()) {
cb.deleteComplete(BKException.Code.ClientClosedException, null);
return;
}
op.initiate();
} finally {
closeLock.readLock().unlock();
}
}
}
}
| 350 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerEntry.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkState;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import java.io.InputStream;
import org.apache.bookkeeper.client.impl.LedgerEntryImpl;
import org.apache.bookkeeper.conf.ClientConfiguration;
/**
* Ledger entry. Its a simple tuple containing the ledger id, the entry-id, and
* the entry content.
*
*/
public class LedgerEntry {
final long ledgerId;
final long entryId;
final long length;
ByteBuf data;
LedgerEntry(LedgerEntryImpl entry) {
this.ledgerId = entry.getLedgerId();
this.entryId = entry.getEntryId();
this.length = entry.getLength();
this.data = entry.getEntryBuffer().retain();
}
public long getLedgerId() {
return ledgerId;
}
public long getEntryId() {
return entryId;
}
public long getLength() {
return length;
}
/**
* Returns the content of the entry.
* This method can be called only once. While using v2 wire protocol this method will automatically release
* the internal ByteBuf
*
* @return the content of the entry
* @throws IllegalStateException if this method is called twice
*/
public byte[] getEntry() {
checkState(null != data, "entry content can be accessed only once");
byte[] entry = new byte[data.readableBytes()];
data.readBytes(entry);
data.release();
data = null;
return entry;
}
/**
* Returns the content of the entry.
* This method can be called only once. While using v2 wire protocol this method will automatically release
* the internal ByteBuf when calling the close
* method of the returned InputStream
*
* @return an InputStream which gives access to the content of the entry
* @throws IllegalStateException if this method is called twice
*/
public InputStream getEntryInputStream() {
checkState(null != data, "entry content can be accessed only once");
ByteBufInputStream res = new ByteBufInputStream(data);
data = null;
return res;
}
/**
* Return the internal buffer that contains the entry payload.
*
* <p>Note: Using v2 wire protocol it is responsibility of the caller to ensure to release the buffer after usage.
*
* @return a ByteBuf which contains the data
*
* @see ClientConfiguration#setNettyUsePooledBuffers(boolean)
* @throws IllegalStateException if the entry has been retrieved by {@link #getEntry()}
* or {@link #getEntryInputStream()}.
*/
public ByteBuf getEntryBuffer() {
checkState(null != data, "entry content has been retrieved by #getEntry or #getEntryInputStream");
return data;
}
}
| 351 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DefaultSpeculativeRequestExecutionPolicy.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A default implementation of {@link SpeculativeRequestExecutionPolicy}.
*
* <p>The policy issues speculative requests in a backoff way. The time between two speculative requests
* are between {@code firstSpeculativeRequestTimeout} and {@code maxSpeculativeRequestTimeout}.
*/
public class DefaultSpeculativeRequestExecutionPolicy implements SpeculativeRequestExecutionPolicy {
private static final Logger LOG = LoggerFactory.getLogger(DefaultSpeculativeRequestExecutionPolicy.class);
final int firstSpeculativeRequestTimeout;
final int maxSpeculativeRequestTimeout;
final float backoffMultiplier;
public DefaultSpeculativeRequestExecutionPolicy(int firstSpeculativeRequestTimeout,
int maxSpeculativeRequestTimeout, float backoffMultiplier) {
this.firstSpeculativeRequestTimeout = firstSpeculativeRequestTimeout;
this.maxSpeculativeRequestTimeout = maxSpeculativeRequestTimeout;
this.backoffMultiplier = backoffMultiplier;
if (backoffMultiplier <= 0) {
throw new IllegalArgumentException("Invalid value provided for backoffMultiplier");
}
// Prevent potential over flow
if (Math.round((double) maxSpeculativeRequestTimeout * (double) backoffMultiplier) > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Invalid values for maxSpeculativeRequestTimeout and backoffMultiplier");
}
}
/**
* Initialize the speculative request execution policy.
*
* @param scheduler The scheduler service to issue the speculative request
* @param requestExecutor The executor is used to issue the actual speculative requests
* @return ScheduledFuture, in case caller needs to cancel it.
*/
@Override
public ScheduledFuture<?> initiateSpeculativeRequest(final ScheduledExecutorService scheduler,
final SpeculativeRequestExecutor requestExecutor) {
return scheduleSpeculativeRead(scheduler, requestExecutor, firstSpeculativeRequestTimeout);
}
private ScheduledFuture<?> scheduleSpeculativeRead(final ScheduledExecutorService scheduler,
final SpeculativeRequestExecutor requestExecutor,
final int speculativeRequestTimeout) {
try {
return scheduler.schedule(new Runnable() {
@Override
public void run() {
ListenableFuture<Boolean> issueNextRequest = requestExecutor.issueSpeculativeRequest();
Futures.addCallback(issueNextRequest, new FutureCallback<Boolean>() {
// we want this handler to run immediately after we push the big red button!
@Override
public void onSuccess(Boolean issueNextRequest) {
if (issueNextRequest) {
scheduleSpeculativeRead(scheduler, requestExecutor,
Math.min(maxSpeculativeRequestTimeout,
Math.round((float) speculativeRequestTimeout * backoffMultiplier)));
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Stopped issuing speculative requests for {}, "
+ "speculativeReadTimeout = {}", requestExecutor, speculativeRequestTimeout);
}
}
}
@Override
public void onFailure(Throwable thrown) {
LOG.warn("Failed to issue speculative request for {}, speculativeReadTimeout = {} : ",
requestExecutor, speculativeRequestTimeout, thrown);
}
}, directExecutor());
}
}, speculativeRequestTimeout, TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException re) {
if (!scheduler.isShutdown()) {
LOG.warn("Failed to schedule speculative request for {}, speculativeReadTimeout = {} : ",
requestExecutor, speculativeRequestTimeout, re);
}
}
return null;
}
}
| 352 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerRecoveryOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import com.google.common.annotations.VisibleForTesting;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadEntryListener;
import org.apache.bookkeeper.proto.checksum.DigestManager.RecoveryData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulated the ledger recovery operation. It first does a read
* with entry-id of -1 (BookieProtocol.LAST_ADD_CONFIRMED) to all bookies. Then
* starting from the last confirmed entry (from hints in the ledger entries),
* it reads forward until it is not able to find a particular entry.
*/
class LedgerRecoveryOp implements ReadEntryListener, AddCallback {
static final Logger LOG = LoggerFactory.getLogger(LedgerRecoveryOp.class);
final LedgerHandle lh;
final ClientContext clientCtx;
final CompletableFuture<LedgerHandle> promise;
final AtomicLong readCount, writeCount;
volatile boolean readDone;
volatile long startEntryToRead;
volatile long endEntryToRead;
// keep a copy of metadata for recovery.
LedgerMetadata metadataForRecovery;
// EntryListener Hook
@VisibleForTesting
ReadEntryListener entryListener = null;
class RecoveryReadOp extends ListenerBasedPendingReadOp {
RecoveryReadOp(LedgerHandle lh,
ClientContext clientCtx,
long startEntryId, long endEntryId,
ReadEntryListener cb, Object ctx) {
super(lh, clientCtx, startEntryId, endEntryId, cb, ctx, true);
}
@Override
protected LedgerMetadata getLedgerMetadata() {
return metadataForRecovery;
}
}
public LedgerRecoveryOp(LedgerHandle lh, ClientContext clientCtx) {
readCount = new AtomicLong(0);
writeCount = new AtomicLong(0);
readDone = false;
this.promise = new CompletableFuture<>();
this.lh = lh;
this.clientCtx = clientCtx;
}
/**
* Set an entry listener to listen on individual recovery reads during recovery procedure.
*
* @param entryListener
* entry listener
* @return ledger recovery operation
*/
@VisibleForTesting
LedgerRecoveryOp setEntryListener(ReadEntryListener entryListener) {
this.entryListener = entryListener;
return this;
}
public CompletableFuture<LedgerHandle> initiate() {
ReadLastConfirmedOp rlcop = new ReadLastConfirmedOp(clientCtx.getBookieClient(),
lh.distributionSchedule,
lh.macManager,
lh.ledgerId,
lh.getCurrentEnsemble(),
lh.ledgerKey,
new ReadLastConfirmedOp.LastConfirmedDataCallback() {
@Override
public void readLastConfirmedDataComplete(int rc, RecoveryData data) {
if (rc == BKException.Code.OK) {
synchronized (lh) {
/**
The lowest an LAC can be for use in recovery is the first entry id
of the current ensemble - 1.
All ensembles prior to the current one, if any, are confirmed and
immutable (so are not part of the recovery process).
So we take the highest of:
- the LAC returned by the current bookie ensemble (could be -1)
- the first entry id of the current ensemble - 1.
*/
Long lastEnsembleEntryId = lh.getVersionedLedgerMetadata()
.getValue()
.getAllEnsembles()
.lastEntry()
.getKey();
lh.lastAddPushed = lh.lastAddConfirmed = Math.max(data.getLastAddConfirmed(),
(lastEnsembleEntryId - 1));
lh.length = data.getLength();
lh.pendingAddsSequenceHead = lh.lastAddConfirmed;
startEntryToRead = endEntryToRead = lh.lastAddConfirmed;
}
// keep a copy of ledger metadata before proceeding
// ledger recovery
metadataForRecovery = lh.getLedgerMetadata();
doRecoveryRead();
} else if (rc == BKException.Code.TimeoutException) {
submitCallback(rc);
} else if (rc == BKException.Code.UnauthorizedAccessException) {
submitCallback(rc);
} else {
submitCallback(BKException.Code.ReadException);
}
}
});
/**
* Enable fencing on this op. When the read request reaches the bookies
* server it will fence off the ledger, stopping any subsequent operation
* from writing to it.
*/
rlcop.initiateWithFencing();
return promise;
}
private void submitCallback(int rc) {
if (BKException.Code.OK == rc) {
clientCtx.getClientStats().getRecoverAddCountLogger().registerSuccessfulValue(writeCount.get());
clientCtx.getClientStats().getRecoverReadCountLogger().registerSuccessfulValue(readCount.get());
promise.complete(lh);
} else {
clientCtx.getClientStats().getRecoverAddCountLogger().registerFailedValue(writeCount.get());
clientCtx.getClientStats().getRecoverReadCountLogger().registerFailedValue(readCount.get());
promise.completeExceptionally(BKException.create(rc));
}
}
/**
* Try to read past the last confirmed.
*/
private void doRecoveryRead() {
if (!promise.isDone()) {
startEntryToRead = endEntryToRead + 1;
endEntryToRead = endEntryToRead + clientCtx.getConf().recoveryReadBatchSize;
new RecoveryReadOp(lh, clientCtx, startEntryToRead, endEntryToRead, this, null)
.initiate();
}
}
@Override
public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) {
// notify entry listener on individual entries being read during ledger recovery.
ReadEntryListener listener = entryListener;
if (null != listener) {
listener.onEntryComplete(rc, lh, entry, ctx);
}
// we only trigger recovery add an entry when readDone == false && callbackDone == false
if (!promise.isDone() && !readDone && rc == BKException.Code.OK) {
readCount.incrementAndGet();
byte[] data = entry.getEntry();
/*
* We will add this entry again to make sure it is written to enough
* replicas. We subtract the length of the data itself, since it will
* be added again when processing the call to add it.
*/
synchronized (lh) {
lh.length = entry.getLength() - (long) data.length;
// check whether entry id is expected, so we won't overwritten any entries by mistake
if (entry.getEntryId() != lh.lastAddPushed + 1) {
LOG.error("Unexpected to recovery add entry {} as entry {} for ledger {}.",
entry.getEntryId(), (lh.lastAddPushed + 1), lh.getId());
rc = BKException.Code.UnexpectedConditionException;
}
}
if (BKException.Code.OK == rc) {
lh.asyncRecoveryAddEntry(data, 0, data.length, this, null);
if (entry.getEntryId() == endEntryToRead) {
// trigger next batch read
doRecoveryRead();
}
return;
}
}
// no entry found. stop recovery procedure but wait until recovery add finished.
if (rc == BKException.Code.NoSuchEntryException || rc == BKException.Code.NoSuchLedgerExistsException) {
readDone = true;
if (readCount.get() == writeCount.get()) {
submitCallback(BKException.Code.OK);
}
return;
}
// otherwise, some other error, we can't handle
if (BKException.Code.OK != rc && !promise.isDone()) {
LOG.error("Failure {} while reading entries: ({} - {}), ledger: {} while recovering ledger",
BKException.getMessage(rc), startEntryToRead, endEntryToRead, lh.getId());
submitCallback(rc);
} else if (BKException.Code.OK == rc) {
// we are here is because we successfully read an entry but readDone was already set to true.
// this would happen on recovery a ledger than has gaps in the tail.
LOG.warn("Successfully read entry {} for ledger {}, but readDone is already {}",
entry.getEntryId(), lh.getId(), readDone);
}
return;
}
@Override
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
if (rc != BKException.Code.OK) {
LOG.error("Failure {} while writing entry: {} while recovering ledger: {}",
BKException.codeLogger(rc), entryId + 1, lh.ledgerId);
submitCallback(rc);
return;
}
long numAdd = writeCount.incrementAndGet();
if (readDone && readCount.get() == numAdd) {
submitCallback(rc);
}
}
}
| 353 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/PendingReadLacOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.buffer.ByteBuf;
import java.util.List;
import org.apache.bookkeeper.client.BKException.BKDigestMatchException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ReadLacCallback;
import org.apache.bookkeeper.proto.checksum.DigestManager.RecoveryData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a pending ReadLac operation.
*
* <p>LAC is stored in two places on bookies.
* 1. WriteLac operation sends Explicit LAC and is stored in memory on each bookie.
* 2. Each AddEntry operation piggy-backs LAC which is stored on bookie's disk.
*
* <p>This operation returns both of those entries and we pick the latest LAC out of
* available answers.
*
* <p>This is an optional protocol operations to facilitate tailing readers
* to be up to date with the writer. This is best effort to get latest LAC
* from bookies, and doesn't affect the correctness of the protocol.
*/
class PendingReadLacOp implements ReadLacCallback {
static final Logger LOG = LoggerFactory.getLogger(PendingReadLacOp.class);
LedgerHandle lh;
BookieClient bookieClient;
LacCallback cb;
int numResponsesPending;
volatile boolean completed = false;
int lastSeenError = BKException.Code.ReadException;
final DistributionSchedule.QuorumCoverageSet coverageSet;
long maxLac = LedgerHandle.INVALID_ENTRY_ID;
final List<BookieId> currentEnsemble;
/*
* Wrapper to get Lac from the request
*/
interface LacCallback {
void getLacComplete(int rc, long lac);
}
PendingReadLacOp(LedgerHandle lh, BookieClient bookieClient, List<BookieId> ensemble, LacCallback cb) {
this.lh = lh;
this.bookieClient = bookieClient;
this.cb = cb;
this.numResponsesPending = ensemble.size();
this.coverageSet = lh.distributionSchedule.getCoverageSet();
this.currentEnsemble = ensemble;
}
public void initiate() {
for (int i = 0; i < currentEnsemble.size(); i++) {
bookieClient.readLac(currentEnsemble.get(i), lh.ledgerId, this, i);
}
}
@Override
public void readLacComplete(int rc, long ledgerId, final ByteBuf lacBuffer, final ByteBuf lastEntryBuffer,
Object ctx) {
int bookieIndex = (Integer) ctx;
// add the response to coverage set
coverageSet.addBookie(bookieIndex, rc);
numResponsesPending--;
boolean heardValidResponse = false;
if (completed) {
return;
}
if (rc == BKException.Code.OK) {
try {
// Each bookie may have two store LAC in two places.
// One is in-memory copy in FileInfo and other is
// piggy-backed LAC on the last entry.
// This routine picks both of them and compares to return
// the latest Lac.
// lacBuffer and lastEntryBuffer are optional in the protocol.
// So check if they exist before processing them.
// Extract lac from FileInfo on the ledger.
if (lacBuffer != null && lacBuffer.readableBytes() > 0) {
long lac = lh.macManager.verifyDigestAndReturnLac(lacBuffer);
if (lac > maxLac) {
maxLac = lac;
}
}
// Extract lac from last entry on the disk
if (lastEntryBuffer != null && lastEntryBuffer.readableBytes() > 0) {
RecoveryData recoveryData = lh.macManager.verifyDigestAndReturnLastConfirmed(lastEntryBuffer);
long recoveredLac = recoveryData.getLastAddConfirmed();
if (recoveredLac > maxLac) {
maxLac = recoveredLac;
}
}
heardValidResponse = true;
} catch (BKDigestMatchException e) {
// Too bad, this bookie did not give us a valid answer, we
// still might be able to recover. So, continue
LOG.error("Mac mismatch while reading ledger: " + ledgerId + " LAC from bookie: "
+ currentEnsemble.get(bookieIndex));
rc = BKException.Code.DigestMatchException;
}
}
if (rc == BKException.Code.NoSuchLedgerExistsException || rc == BKException.Code.NoSuchEntryException) {
heardValidResponse = true;
}
if (rc == BKException.Code.UnauthorizedAccessException && !completed) {
cb.getLacComplete(rc, maxLac);
completed = true;
return;
}
if (!heardValidResponse && BKException.Code.OK != rc) {
lastSeenError = rc;
}
// We don't consider a success until we have coverage set responses.
if (heardValidResponse
&& coverageSet.checkCovered()
&& !completed) {
completed = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Read LAC complete with enough validResponse for ledger: {} LAC: {}", ledgerId, maxLac);
}
cb.getLacComplete(BKException.Code.OK, maxLac);
return;
}
if (numResponsesPending == 0 && !completed) {
LOG.error(
"While readLac ledger: {} did not hear success responses from all of ensemble, coverageSet is: {}",
ledgerId, coverageSet);
cb.getLacComplete(lastSeenError, maxLac);
}
}
}
| 354 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DynamicWeightedRandomSelectionImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import com.google.common.math.Quantiles;
import com.google.common.math.Quantiles.ScaleAndIndex;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* DynamicWeightedRandomSelectionImpl class implements both getNextRandom
* overloaded methods. Where getNextRandom() method considers all bookies it
* knows of as candidates, but getNextRandom(Collection selectedNodes) method
* considers only 'selectedNodes' as candidates.
*/
class DynamicWeightedRandomSelectionImpl<T> implements WeightedRandomSelection<T> {
static final Logger LOG = LoggerFactory.getLogger(DynamicWeightedRandomSelectionImpl.class);
int maxProbabilityMultiplier;
final Map<T, WeightedObject> weightMap;
final ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
Random rand;
DynamicWeightedRandomSelectionImpl() {
this(-1);
}
DynamicWeightedRandomSelectionImpl(int maxMultiplier) {
this.maxProbabilityMultiplier = maxMultiplier;
this.weightMap = new HashMap<T, WeightedObject>();
rand = new Random(System.currentTimeMillis());
}
@Override
public void updateMap(Map<T, WeightedObject> updatedMap) {
rwLock.writeLock().lock();
try {
weightMap.clear();
weightMap.putAll(updatedMap);
} finally {
rwLock.writeLock().unlock();
}
}
@Override
public T getNextRandom() {
rwLock.readLock().lock();
try {
return getNextRandom(weightMap.keySet());
} finally {
rwLock.readLock().unlock();
}
}
@Override
public T getNextRandom(Collection<T> selectedNodes) {
rwLock.readLock().lock();
try {
/*
* calculate minWeight and actual total weight.
*/
long minWeight = Long.MAX_VALUE;
long actTotalWeight = 0;
for (T node : selectedNodes) {
long weight = 0;
if ((weightMap.containsKey(node))) {
weight = weightMap.get(node).getWeight();
}
actTotalWeight += weight;
if (weight > 0 && minWeight > weight) {
minWeight = weight;
}
}
long medianWeight;
/*
* if actTotalWeight is 0, then assign 1 to minWeight and
* medianWeight.
*/
if (actTotalWeight == 0) {
minWeight = 1L;
medianWeight = 1L;
} else {
/*
* calculate medianWeight.
*/
Function<? super T, ? extends Long> weightFunc = (node) -> {
long weight = 0;
if ((weightMap.containsKey(node))) {
weight = weightMap.get(node).getWeight();
}
return weight;
};
ArrayList<Long> weightList = selectedNodes.stream().map(weightFunc)
.collect(Collectors.toCollection(ArrayList::new));
ScaleAndIndex median = Quantiles.median();
medianWeight = (long) median.compute(weightList);
}
/*
* initialize maxWeight value based on maxProbabilityMultiplier.
*/
long maxWeight = maxProbabilityMultiplier * medianWeight;
/*
* apply weighted random selection to select an element randomly
* based on weight.
*/
long cumTotalWeight = 0;
T nextRandomNode = null;
for (T node : selectedNodes) {
long weight = 0;
if ((weightMap.containsKey(node))) {
weight = weightMap.get(node).getWeight();
}
if (weight <= 0) {
weight = minWeight;
} else if (maxWeight > 0 && weight > maxWeight) {
weight = maxWeight;
}
long tmpRandLong = rand.nextLong();
if (tmpRandLong == Long.MIN_VALUE) {
tmpRandLong++;
}
long randValue = Math.abs(tmpRandLong) % (cumTotalWeight + weight);
if (randValue >= cumTotalWeight) {
nextRandomNode = node;
}
cumTotalWeight += weight;
}
return nextRandomNode;
} finally {
rwLock.readLock().unlock();
}
}
@Override
public void setMaxProbabilityMultiplier(int max) {
this.maxProbabilityMultiplier = max;
}
}
| 355 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/UpdateLedgerOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import com.google.common.util.concurrent.RateLimiter;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import org.apache.bookkeeper.bookie.BookieShell.UpdateLedgerNotifier;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Encapsulates updating the ledger metadata operation.
*/
public class UpdateLedgerOp {
private static final Logger LOG = LoggerFactory.getLogger(UpdateLedgerOp.class);
private final LedgerManager lm;
private final BookKeeperAdmin admin;
public UpdateLedgerOp(final BookKeeper bkc, final BookKeeperAdmin admin) {
this.lm = bkc.getLedgerManager();
this.admin = admin;
}
/**
* Update the bookie id present in the ledger metadata.
*
* @param oldBookieId
* current bookie id
* @param newBookieId
* new bookie id
* @param rate
* number of ledgers updating per second (default 5 per sec)
* @param limit
* maximum number of ledgers to update (default: no limit). Stop
* update if reaching limit
* @param progressable
* report progress of the ledger updates
* @throws IOException
* if there is an error when updating bookie id in ledger
* metadata
*/
public void updateBookieIdInLedgers(final BookieId oldBookieId, final BookieId newBookieId,
final int rate, int maxOutstandingReads, final int limit,
final UpdateLedgerNotifier progressable)
throws IOException, InterruptedException {
final AtomicInteger issuedLedgerCnt = new AtomicInteger();
final AtomicInteger updatedLedgerCnt = new AtomicInteger();
final CompletableFuture<Void> finalPromise = new CompletableFuture<>();
final Set<CompletableFuture<?>> outstanding =
Collections.newSetFromMap(new ConcurrentHashMap<CompletableFuture<?>, Boolean>());
final RateLimiter throttler = RateLimiter.create(rate);
final Semaphore outstandingReads = new Semaphore(maxOutstandingReads);
final Iterator<Long> ledgerItr = admin.listLedgers().iterator();
// iterate through all the ledgers
while (ledgerItr.hasNext() && !finalPromise.isDone()
&& (limit == Integer.MIN_VALUE || issuedLedgerCnt.get() < limit)) {
// semaphore to control reads according to update throttling
outstandingReads.acquire();
final long ledgerId = ledgerItr.next();
issuedLedgerCnt.incrementAndGet();
CompletableFuture<Versioned<LedgerMetadata>> writePromise = lm.readLedgerMetadata(ledgerId)
.thenCompose((readMetadata) -> {
AtomicReference<Versioned<LedgerMetadata>> ref = new AtomicReference<>(readMetadata);
return new MetadataUpdateLoop(
lm, ledgerId,
ref::get,
(metadata) -> {
return metadata.getAllEnsembles().values().stream()
.flatMap(Collection::stream)
.anyMatch(b -> b.equals(oldBookieId));
},
(metadata) -> {
return replaceBookieInEnsembles(metadata, oldBookieId, newBookieId);
},
ref::compareAndSet, throttler).run();
});
outstanding.add(writePromise);
writePromise.whenComplete((metadata, ex) -> {
if (ex != null
&& !(ex instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException)) {
String error = String.format("Failed to update ledger metadata %s, replacing %s with %s",
ledgerId, oldBookieId, newBookieId);
LOG.error(error, ex);
finalPromise.completeExceptionally(new IOException(error, ex));
} else {
LOG.info("Updated ledger {} metadata, replacing {} with {}",
ledgerId, oldBookieId, newBookieId);
updatedLedgerCnt.incrementAndGet();
progressable.progress(updatedLedgerCnt.get(), issuedLedgerCnt.get());
}
outstandingReads.release();
outstanding.remove(writePromise);
});
}
CompletableFuture.allOf(outstanding.stream().toArray(CompletableFuture[]::new))
.whenComplete((res, ex) -> {
if (ex != null) {
finalPromise.completeExceptionally(ex);
} else {
finalPromise.complete(null);
}
});
try {
finalPromise.get();
LOG.info("Total number of ledgers issued={} updated={}",
issuedLedgerCnt.get(), updatedLedgerCnt.get());
} catch (ExecutionException e) {
String error = String.format("Error waiting for ledger metadata updates to complete (replacing %s with %s)",
oldBookieId, newBookieId);
LOG.info(error, e);
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw new IOException(error, e);
}
}
}
private static LedgerMetadata replaceBookieInEnsembles(LedgerMetadata metadata,
BookieId oldBookieId,
BookieId newBookieId) {
LedgerMetadataBuilder builder = LedgerMetadataBuilder.from(metadata);
for (Map.Entry<Long, ? extends List<BookieId>> e : metadata.getAllEnsembles().entrySet()) {
List<BookieId> newEnsemble = e.getValue().stream()
.map(b -> b.equals(oldBookieId) ? newBookieId : b)
.collect(Collectors.toList());
builder.replaceEnsembleEntry(e.getKey(), newEnsemble);
}
return builder.build();
}
}
| 356 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ExplicitLacFlushPolicy.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.client.SyncCallbackUtils.LastAddConfirmedCallback;
import org.apache.bookkeeper.util.ByteBufList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
interface ExplicitLacFlushPolicy {
void stopExplicitLacFlush();
void updatePiggyBackedLac(long piggyBackedLac);
ExplicitLacFlushPolicy VOID_EXPLICITLAC_FLUSH_POLICY = new ExplicitLacFlushPolicy() {
@Override
public void stopExplicitLacFlush() {
// void method
}
@Override
public void updatePiggyBackedLac(long piggyBackedLac) {
// void method
}
};
class ExplicitLacFlushPolicyImpl implements ExplicitLacFlushPolicy {
static final Logger LOG = LoggerFactory.getLogger(ExplicitLacFlushPolicyImpl.class);
volatile long piggyBackedLac = LedgerHandle.INVALID_ENTRY_ID;
volatile long explicitLac = LedgerHandle.INVALID_ENTRY_ID;
final LedgerHandle lh;
final ClientContext clientCtx;
ScheduledFuture<?> scheduledFuture;
ExplicitLacFlushPolicyImpl(LedgerHandle lh,
ClientContext clientCtx) {
this.lh = lh;
this.clientCtx = clientCtx;
scheduleExplictLacFlush();
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduled Explicit Last Add Confirmed Update");
}
}
private long getExplicitLac() {
return explicitLac;
}
private void setExplicitLac(long explicitLac) {
this.explicitLac = explicitLac;
}
private long getPiggyBackedLac() {
return piggyBackedLac;
}
public void setPiggyBackedLac(long piggyBackedLac) {
this.piggyBackedLac = piggyBackedLac;
}
private void scheduleExplictLacFlush() {
final Runnable updateLacTask = new Runnable() {
@Override
public void run() {
// Made progress since previous explicitLAC through
// Piggyback, so no need to send an explicit LAC update to
// bookies.
if (getExplicitLac() < getPiggyBackedLac()) {
if (LOG.isDebugEnabled()) {
LOG.debug("ledgerid: {}", lh.getId());
LOG.debug("explicitLac:{} piggybackLac:{}", getExplicitLac(), getPiggyBackedLac());
}
setExplicitLac(getPiggyBackedLac());
return;
}
if (lh.getLastAddConfirmed() > getExplicitLac()) {
// Send Explicit LAC
if (LOG.isDebugEnabled()) {
LOG.debug("ledgerid: {}", lh.getId());
}
asyncExplicitLacFlush(lh.getLastAddConfirmed());
setExplicitLac(lh.getLastAddConfirmed());
if (LOG.isDebugEnabled()) {
LOG.debug("After sending explict LAC lac: {} explicitLac:{}", lh.getLastAddConfirmed(),
getExplicitLac());
}
}
}
@Override
public String toString() {
return String.format("UpdateLacTask ledgerId - (%d)", lh.getId());
}
};
try {
long explicitLacIntervalInMs = clientCtx.getConf().explicitLacInterval;
scheduledFuture = clientCtx.getScheduler().scheduleAtFixedRateOrdered(lh.getId(), updateLacTask,
explicitLacIntervalInMs, explicitLacIntervalInMs, TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException re) {
LOG.error("Scheduling of ExplictLastAddConfirmedFlush for ledger: {} has failed.", lh.getId(), re);
}
}
/**
* Make a LastAddUpdate request.
*/
void asyncExplicitLacFlush(final long explicitLac) {
final LastAddConfirmedCallback cb = LastAddConfirmedCallback.INSTANCE;
final PendingWriteLacOp op = new PendingWriteLacOp(lh, clientCtx, lh.getCurrentEnsemble(), cb, null);
op.setLac(explicitLac);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending Explicit LAC: {}", explicitLac);
}
clientCtx.getMainWorkerPool().submit(() -> {
ByteBufList toSend = lh.macManager
.computeDigestAndPackageForSendingLac(lh.getLastAddConfirmed());
op.initiate(toSend);
});
} catch (RejectedExecutionException e) {
cb.addLacComplete(BookKeeper.getReturnRc(clientCtx.getBookieClient(),
BKException.Code.InterruptedException),
lh, null);
}
}
@Override
public void stopExplicitLacFlush() {
scheduledFuture.cancel(true);
}
@Override
public void updatePiggyBackedLac(long piggyBackedLac) {
setPiggyBackedLac(piggyBackedLac);
}
}
}
| 357 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.util.HashedWheelTimer;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.net.Node;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* A placement policy implementation use rack information for placing ensembles.
*
* @see EnsemblePlacementPolicy
*/
public class RackawareEnsemblePlacementPolicy extends RackawareEnsemblePlacementPolicyImpl
implements ITopologyAwareEnsemblePlacementPolicy<BookieNode> {
RackawareEnsemblePlacementPolicyImpl slave = null;
public RackawareEnsemblePlacementPolicy() {
super();
}
public RackawareEnsemblePlacementPolicy(boolean enforceDurability) {
super(enforceDurability);
}
@Override
protected RackawareEnsemblePlacementPolicy initialize(DNSToSwitchMapping dnsResolver,
HashedWheelTimer timer,
boolean reorderReadsRandom,
int stabilizePeriodSeconds,
int reorderThresholdPendingRequests,
boolean isWeighted,
int maxWeightMultiple,
int minNumRacksPerWriteQuorum,
boolean enforceMinNumRacksPerWriteQuorum,
boolean ignoreLocalNodeInPlacementPolicy,
StatsLogger statsLogger, BookieAddressResolver bookieAddressResolver) {
return initialize(dnsResolver, timer, reorderReadsRandom, stabilizePeriodSeconds,
reorderThresholdPendingRequests, isWeighted, maxWeightMultiple, minNumRacksPerWriteQuorum,
enforceMinNumRacksPerWriteQuorum, ignoreLocalNodeInPlacementPolicy, false,
statsLogger, bookieAddressResolver);
}
@Override
protected RackawareEnsemblePlacementPolicy initialize(DNSToSwitchMapping dnsResolver,
HashedWheelTimer timer,
boolean reorderReadsRandom,
int stabilizePeriodSeconds,
int reorderThresholdPendingRequests,
boolean isWeighted,
int maxWeightMultiple,
int minNumRacksPerWriteQuorum,
boolean enforceMinNumRacksPerWriteQuorum,
boolean ignoreLocalNodeInPlacementPolicy,
boolean useHostnameResolveLocalNodePlacementPolicy,
StatsLogger statsLogger, BookieAddressResolver bookieAddressResolver) {
if (stabilizePeriodSeconds > 0) {
super.initialize(dnsResolver, timer, reorderReadsRandom, 0, reorderThresholdPendingRequests, isWeighted,
maxWeightMultiple, minNumRacksPerWriteQuorum, enforceMinNumRacksPerWriteQuorum,
ignoreLocalNodeInPlacementPolicy, useHostnameResolveLocalNodePlacementPolicy,
statsLogger, bookieAddressResolver);
slave = new RackawareEnsemblePlacementPolicyImpl(enforceDurability);
slave.initialize(dnsResolver, timer, reorderReadsRandom, stabilizePeriodSeconds,
reorderThresholdPendingRequests, isWeighted, maxWeightMultiple, minNumRacksPerWriteQuorum,
enforceMinNumRacksPerWriteQuorum, ignoreLocalNodeInPlacementPolicy,
useHostnameResolveLocalNodePlacementPolicy, statsLogger, bookieAddressResolver);
} else {
super.initialize(dnsResolver, timer, reorderReadsRandom, stabilizePeriodSeconds,
reorderThresholdPendingRequests, isWeighted, maxWeightMultiple, minNumRacksPerWriteQuorum,
enforceMinNumRacksPerWriteQuorum, ignoreLocalNodeInPlacementPolicy,
useHostnameResolveLocalNodePlacementPolicy, statsLogger, bookieAddressResolver);
slave = null;
}
return this;
}
@Override
public void uninitalize() {
super.uninitalize();
if (null != slave) {
slave.uninitalize();
}
}
@Override
public Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies) {
Set<BookieId> deadBookies = super.onClusterChanged(writableBookies, readOnlyBookies);
if (null != slave) {
deadBookies = slave.onClusterChanged(writableBookies, readOnlyBookies);
}
return deadBookies;
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKException.BKNotEnoughBookiesException {
try {
return super.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, excludeBookies);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, excludeBookies);
}
}
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKException.BKNotEnoughBookiesException {
try {
return super.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata,
currentEnsemble, bookieToReplace, excludeBookies);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata,
currentEnsemble, bookieToReplace, excludeBookies);
}
}
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return super.reorderReadSequence(ensemble, bookiesHealthInfo,
writeSet);
}
@Override
public DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return super.reorderReadLACSequence(ensemble, bookiesHealthInfo,
writeSet);
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
Ensemble<BookieNode> parentEnsemble,
Predicate<BookieNode> parentPredicate)
throws BKException.BKNotEnoughBookiesException {
try {
return super.newEnsemble(
ensembleSize,
writeQuorumSize,
ackQuorumSize,
excludeBookies,
parentEnsemble,
parentPredicate);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize,
excludeBookies, parentEnsemble, parentPredicate);
}
}
}
@Override
public BookieNode selectFromNetworkLocation(
String networkLoc,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKException.BKNotEnoughBookiesException {
try {
return super.selectFromNetworkLocation(networkLoc, excludeBookies, predicate, ensemble,
fallbackToRandom);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.selectFromNetworkLocation(networkLoc, excludeBookies, predicate, ensemble,
fallbackToRandom);
}
}
}
@Override
public BookieNode selectFromNetworkLocation(
Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKException.BKNotEnoughBookiesException {
try {
return super.selectFromNetworkLocation(excludeRacks, excludeBookies, predicate, ensemble, fallbackToRandom);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.selectFromNetworkLocation(excludeRacks, excludeBookies, predicate, ensemble,
fallbackToRandom);
}
}
}
@Override
public BookieNode selectFromNetworkLocation(
String networkLoc,
Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKNotEnoughBookiesException {
try {
return super.selectFromNetworkLocation(networkLoc, excludeRacks, excludeBookies, predicate, ensemble,
fallbackToRandom);
} catch (BKException.BKNotEnoughBookiesException bnebe) {
if (slave == null) {
throw bnebe;
} else {
return slave.selectFromNetworkLocation(networkLoc, excludeRacks, excludeBookies, predicate, ensemble,
fallbackToRandom);
}
}
}
@Override
public PlacementResult<List<BookieId>> replaceToAdherePlacementPolicy(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
List<BookieId> currentEnsemble) {
final PlacementResult<List<BookieId>> placementResult =
super.replaceToAdherePlacementPolicy(ensembleSize, writeQuorumSize, ackQuorumSize,
excludeBookies, currentEnsemble);
if (placementResult.getAdheringToPolicy() != PlacementPolicyAdherence.FAIL) {
return placementResult;
} else {
if (slave == null) {
return placementResult;
} else {
return slave.replaceToAdherePlacementPolicy(ensembleSize, writeQuorumSize, ackQuorumSize,
excludeBookies, currentEnsemble);
}
}
}
@Override
public void handleBookiesThatLeft(Set<BookieId> leftBookies) {
super.handleBookiesThatLeft(leftBookies);
if (null != slave) {
slave.handleBookiesThatLeft(leftBookies);
}
}
@Override
public void handleBookiesThatJoined(Set<BookieId> joinedBookies) {
super.handleBookiesThatJoined(joinedBookies);
if (null != slave) {
slave.handleBookiesThatJoined(joinedBookies);
}
}
}
| 358 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/RackawareEnsemblePlacementPolicyImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_JOINED;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIES_LEFT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT;
import static org.apache.bookkeeper.client.BookKeeperClientStats.CLIENT_SCOPE;
import static org.apache.bookkeeper.client.BookKeeperClientStats.NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK;
import static org.apache.bookkeeper.client.BookKeeperClientStats.READ_REQUESTS_REORDERED;
import static org.apache.bookkeeper.client.RegionAwareEnsemblePlacementPolicy.UNKNOWN_REGION;
import com.beust.jcommander.internal.Lists;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import io.netty.util.HashedWheelTimer;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.WeightedRandomSelection.WeightedObject;
import org.apache.bookkeeper.common.util.ReflectionUtils;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.Configurable;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.net.NetworkTopology;
import org.apache.bookkeeper.net.NetworkTopologyImpl;
import org.apache.bookkeeper.net.Node;
import org.apache.bookkeeper.net.NodeBase;
import org.apache.bookkeeper.net.ScriptBasedMapping;
import org.apache.bookkeeper.net.StabilizeNetworkTopology;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple rackware ensemble placement policy.
*
* <p>Make most of the class and methods as protected, so it could be extended to implement other algorithms.
*/
@StatsDoc(
name = CLIENT_SCOPE,
help = "BookKeeper client stats"
)
public class RackawareEnsemblePlacementPolicyImpl extends TopologyAwareEnsemblePlacementPolicy {
static final Logger LOG = LoggerFactory.getLogger(RackawareEnsemblePlacementPolicyImpl.class);
int maxWeightMultiple;
protected int minNumRacksPerWriteQuorum;
protected boolean enforceMinNumRacksPerWriteQuorum;
protected boolean ignoreLocalNodeInPlacementPolicy;
protected boolean useHostnameResolveLocalNodePlacementPolicy;
public static final String REPP_RANDOM_READ_REORDERING = "ensembleRandomReadReordering";
static final int RACKNAME_DISTANCE_FROM_LEAVES = 1;
// masks for reordering
static final int LOCAL_MASK = 0x01 << 24;
static final int LOCAL_FAIL_MASK = 0x02 << 24;
static final int REMOTE_MASK = 0x04 << 24;
static final int REMOTE_FAIL_MASK = 0x08 << 24;
static final int READ_ONLY_MASK = 0x10 << 24;
static final int SLOW_MASK = 0x20 << 24;
static final int UNAVAIL_MASK = 0x40 << 24;
static final int MASK_BITS = 0xFFF << 20;
protected HashedWheelTimer timer;
// Use a loading cache so slow bookies are expired. Use entryId as values.
protected Cache<BookieId, Long> slowBookies;
protected BookieNode localNode;
protected boolean reorderReadsRandom = false;
protected boolean enforceDurability = false;
protected int stabilizePeriodSeconds = 0;
protected int reorderThresholdPendingRequests = 0;
// looks like these only assigned in the same thread as constructor, immediately after constructor;
// no need to make volatile
protected StatsLogger statsLogger = null;
@StatsDoc(
name = READ_REQUESTS_REORDERED,
help = "The distribution of number of bookies reordered on each read request"
)
protected OpStatsLogger readReorderedCounter = null;
@StatsDoc(
name = FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT,
help = "Counter for number of times DNSResolverDecorator failed to resolve Network Location"
)
protected Counter failedToResolveNetworkLocationCounter = null;
@StatsDoc(
name = NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK,
help = "Gauge for the number of writable Bookies in default rack"
)
protected Gauge<Integer> numWritableBookiesInDefaultRack;
private String defaultRack = NetworkTopology.DEFAULT_RACK;
RackawareEnsemblePlacementPolicyImpl() {
this(false);
}
RackawareEnsemblePlacementPolicyImpl(boolean enforceDurability) {
this.enforceDurability = enforceDurability;
topology = new NetworkTopologyImpl();
}
/**
* Initialize the policy.
*
* @param dnsResolver
* @param timer
* @param reorderReadsRandom
* @param stabilizePeriodSeconds
* @param reorderThresholdPendingRequests
* @param isWeighted
* @param maxWeightMultiple
* @param minNumRacksPerWriteQuorum
* @param enforceMinNumRacksPerWriteQuorum
* @param ignoreLocalNodeInPlacementPolicy
* @param statsLogger
* @param bookieAddressResolver
* @return initialized ensemble placement policy
*/
protected RackawareEnsemblePlacementPolicyImpl initialize(DNSToSwitchMapping dnsResolver,
HashedWheelTimer timer,
boolean reorderReadsRandom,
int stabilizePeriodSeconds,
int reorderThresholdPendingRequests,
boolean isWeighted,
int maxWeightMultiple,
int minNumRacksPerWriteQuorum,
boolean enforceMinNumRacksPerWriteQuorum,
boolean ignoreLocalNodeInPlacementPolicy,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver) {
return initialize(dnsResolver, timer, reorderReadsRandom, stabilizePeriodSeconds,
reorderThresholdPendingRequests, isWeighted, maxWeightMultiple, minNumRacksPerWriteQuorum,
enforceMinNumRacksPerWriteQuorum, ignoreLocalNodeInPlacementPolicy,
false, statsLogger, bookieAddressResolver);
}
/**
* Initialize the policy.
*
* @param dnsResolver the object used to resolve addresses to their network address
* @return initialized ensemble placement policy
*/
protected RackawareEnsemblePlacementPolicyImpl initialize(DNSToSwitchMapping dnsResolver,
HashedWheelTimer timer,
boolean reorderReadsRandom,
int stabilizePeriodSeconds,
int reorderThresholdPendingRequests,
boolean isWeighted,
int maxWeightMultiple,
int minNumRacksPerWriteQuorum,
boolean enforceMinNumRacksPerWriteQuorum,
boolean ignoreLocalNodeInPlacementPolicy,
boolean useHostnameResolveLocalNodePlacementPolicy,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver) {
checkNotNull(statsLogger, "statsLogger should not be null, use NullStatsLogger instead.");
this.statsLogger = statsLogger;
this.bookieAddressResolver = bookieAddressResolver;
this.bookiesJoinedCounter = statsLogger.getOpStatsLogger(BOOKIES_JOINED);
this.bookiesLeftCounter = statsLogger.getOpStatsLogger(BOOKIES_LEFT);
this.readReorderedCounter = statsLogger.getOpStatsLogger(READ_REQUESTS_REORDERED);
this.failedToResolveNetworkLocationCounter = statsLogger.getCounter(FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT);
this.numWritableBookiesInDefaultRack = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
rwLock.readLock().lock();
try {
return topology.countNumOfAvailableNodes(getDefaultRack(), Collections.emptySet());
} finally {
rwLock.readLock().unlock();
}
}
};
this.statsLogger.registerGauge(NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK, numWritableBookiesInDefaultRack);
this.reorderReadsRandom = reorderReadsRandom;
this.stabilizePeriodSeconds = stabilizePeriodSeconds;
this.reorderThresholdPendingRequests = reorderThresholdPendingRequests;
this.dnsResolver = new DNSResolverDecorator(dnsResolver, () -> this.getDefaultRack(),
failedToResolveNetworkLocationCounter);
this.timer = timer;
this.minNumRacksPerWriteQuorum = minNumRacksPerWriteQuorum;
this.enforceMinNumRacksPerWriteQuorum = enforceMinNumRacksPerWriteQuorum;
this.ignoreLocalNodeInPlacementPolicy = ignoreLocalNodeInPlacementPolicy;
this.useHostnameResolveLocalNodePlacementPolicy = useHostnameResolveLocalNodePlacementPolicy;
// create the network topology
if (stabilizePeriodSeconds > 0) {
this.topology = new StabilizeNetworkTopology(timer, stabilizePeriodSeconds);
} else {
this.topology = new NetworkTopologyImpl();
}
BookieNode bn = null;
if (!ignoreLocalNodeInPlacementPolicy) {
try {
String hostname = useHostnameResolveLocalNodePlacementPolicy
? InetAddress.getLocalHost().getCanonicalHostName() : InetAddress.getLocalHost().getHostAddress();
bn = createDummyLocalBookieNode(hostname);
} catch (IOException e) {
LOG.error("Failed to get local host address : ", e);
}
} else {
LOG.info("Ignoring LocalNode in Placementpolicy");
}
localNode = bn;
LOG.info("Initialize rackaware ensemble placement policy @ {} @ {} : {}.",
localNode, null == localNode ? "Unknown" : localNode.getNetworkLocation(),
dnsResolver.getClass().getName());
this.isWeighted = isWeighted;
if (this.isWeighted) {
this.maxWeightMultiple = maxWeightMultiple;
this.weightedSelection = new WeightedRandomSelectionImpl<BookieNode>(this.maxWeightMultiple);
LOG.info("Weight based placement with max multiple of " + this.maxWeightMultiple);
} else {
LOG.info("Not weighted");
}
return this;
}
/*
* sets default rack for the policy.
* i.e. region-aware policy may want to have /region/rack while regular
* rack-aware policy needs /rack only since we cannot mix both styles
*/
public RackawareEnsemblePlacementPolicyImpl withDefaultRack(String rack) {
checkNotNull(rack, "Default rack cannot be null");
this.defaultRack = rack;
return this;
}
public String getDefaultRack() {
return defaultRack;
}
@Override
public RackawareEnsemblePlacementPolicyImpl initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver,
HashedWheelTimer timer,
FeatureProvider featureProvider,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver) {
this.bookieAddressResolver = bookieAddressResolver;
DNSToSwitchMapping dnsResolver;
if (optionalDnsResolver.isPresent()) {
dnsResolver = optionalDnsResolver.get();
} else {
String dnsResolverName = conf.getString(REPP_DNS_RESOLVER_CLASS, ScriptBasedMapping.class.getName());
try {
dnsResolver = ReflectionUtils.newInstance(dnsResolverName, DNSToSwitchMapping.class);
dnsResolver.setBookieAddressResolver(bookieAddressResolver);
if (dnsResolver instanceof Configurable) {
((Configurable) dnsResolver).setConf(conf);
}
if (dnsResolver instanceof RackChangeNotifier) {
((RackChangeNotifier) dnsResolver).registerRackChangeListener(this);
}
} catch (RuntimeException re) {
if (!conf.getEnforceMinNumRacksPerWriteQuorum()) {
LOG.warn("Failed to initialize DNS Resolver {}, used default subnet resolver because {}",
dnsResolverName, re.getMessage());
dnsResolver = new DefaultResolver(this::getDefaultRack);
dnsResolver.setBookieAddressResolver(bookieAddressResolver);
} else {
/*
* if minNumRacksPerWriteQuorum is enforced, then it
* shouldn't continue in the case of failure to create
* dnsResolver.
*/
throw re;
}
}
}
slowBookies = CacheBuilder.newBuilder()
.expireAfterWrite(conf.getBookieFailureHistoryExpirationMSec(), TimeUnit.MILLISECONDS)
.build(new CacheLoader<BookieId, Long>() {
@Override
public Long load(BookieId key) throws Exception {
return -1L;
}
});
return initialize(
dnsResolver,
timer,
conf.getBoolean(REPP_RANDOM_READ_REORDERING, false),
conf.getNetworkTopologyStabilizePeriodSeconds(),
conf.getReorderThresholdPendingRequests(),
conf.getDiskWeightBasedPlacementEnabled(),
conf.getBookieMaxWeightMultipleForWeightBasedPlacement(),
conf.getMinNumRacksPerWriteQuorum(),
conf.getEnforceMinNumRacksPerWriteQuorum(),
conf.getIgnoreLocalNodeInPlacementPolicy(),
conf.getUseHostnameResolveLocalNodePlacementPolicy(),
statsLogger,
bookieAddressResolver);
}
@Override
public void uninitalize() {
// do nothing
}
/*
* this method should be called in readlock scope of 'rwLock'
*/
protected Set<BookieId> addDefaultRackBookiesIfMinNumRacksIsEnforced(
Set<BookieId> excludeBookies) {
Set<BookieId> comprehensiveExclusionBookiesSet;
if (enforceMinNumRacksPerWriteQuorum) {
Set<BookieId> bookiesInDefaultRack = null;
Set<Node> defaultRackLeaves = topology.getLeaves(getDefaultRack());
for (Node node : defaultRackLeaves) {
if (node instanceof BookieNode) {
if (bookiesInDefaultRack == null) {
bookiesInDefaultRack = new HashSet<BookieId>(excludeBookies);
}
bookiesInDefaultRack.add(((BookieNode) node).getAddr());
} else {
LOG.error("found non-BookieNode: {} as leaf of defaultrack: {}", node, getDefaultRack());
}
}
if ((bookiesInDefaultRack == null) || bookiesInDefaultRack.isEmpty()) {
comprehensiveExclusionBookiesSet = excludeBookies;
} else {
comprehensiveExclusionBookiesSet = new HashSet<BookieId>(excludeBookies);
comprehensiveExclusionBookiesSet.addAll(bookiesInDefaultRack);
LOG.info("enforceMinNumRacksPerWriteQuorum is enabled, so Excluding bookies of defaultRack: {}",
bookiesInDefaultRack);
}
} else {
comprehensiveExclusionBookiesSet = excludeBookies;
}
return comprehensiveExclusionBookiesSet;
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
rwLock.readLock().lock();
try {
Set<BookieId> comprehensiveExclusionBookiesSet = addDefaultRackBookiesIfMinNumRacksIsEnforced(
excludeBookies);
PlacementResult<List<BookieId>> newEnsembleResult = newEnsembleInternal(ensembleSize,
writeQuorumSize, ackQuorumSize, comprehensiveExclusionBookiesSet, null, null);
return newEnsembleResult;
} finally {
rwLock.readLock().unlock();
}
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
Ensemble<BookieNode> parentEnsemble,
Predicate<BookieNode> parentPredicate)
throws BKNotEnoughBookiesException {
return newEnsembleInternal(
ensembleSize,
writeQuorumSize,
ackQuorumSize,
excludeBookies,
parentEnsemble,
parentPredicate);
}
protected PlacementResult<List<BookieId>> newEnsembleInternal(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
Ensemble<BookieNode> parentEnsemble,
Predicate<BookieNode> parentPredicate) throws BKNotEnoughBookiesException {
rwLock.readLock().lock();
try {
Set<Node> excludeNodes = convertBookiesToNodes(excludeBookies);
int minNumRacksPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
RRTopologyAwareCoverageEnsemble ensemble =
new RRTopologyAwareCoverageEnsemble(
ensembleSize,
writeQuorumSize,
ackQuorumSize,
RACKNAME_DISTANCE_FROM_LEAVES,
parentEnsemble,
parentPredicate,
minNumRacksPerWriteQuorumForThisEnsemble);
BookieNode prevNode = null;
int numRacks = topology.getNumOfRacks();
// only one rack, use the random algorithm.
if (numRacks < 2) {
if (enforceMinNumRacksPerWriteQuorum && (minNumRacksPerWriteQuorumForThisEnsemble > 1)) {
LOG.error("Only one rack available and minNumRacksPerWriteQuorum is enforced, so giving up");
throw new BKNotEnoughBookiesException();
}
List<BookieNode> bns = selectRandom(ensembleSize, excludeNodes, TruePredicate.INSTANCE,
ensemble);
ArrayList<BookieId> addrs = new ArrayList<BookieId>(ensembleSize);
for (BookieNode bn : bns) {
addrs.add(bn.getAddr());
}
return PlacementResult.of(addrs, PlacementPolicyAdherence.FAIL);
}
//Choose different rack nodes.
String curRack = null;
for (int i = 0; i < ensembleSize; i++) {
if (null == prevNode) {
if ((null == localNode) || defaultRack.equals(localNode.getNetworkLocation())) {
curRack = NodeBase.ROOT;
} else {
curRack = localNode.getNetworkLocation();
}
} else {
if (!curRack.startsWith("~")) {
curRack = "~" + prevNode.getNetworkLocation();
} else {
curRack = curRack + NetworkTopologyImpl.NODE_SEPARATOR + prevNode.getNetworkLocation();
}
}
boolean firstBookieInTheEnsemble = (null == prevNode);
try {
prevNode = selectRandomFromRack(curRack, excludeNodes, ensemble, ensemble);
} catch (BKNotEnoughBookiesException e) {
if (!curRack.equals(NodeBase.ROOT)) {
curRack = NodeBase.ROOT;
prevNode = selectFromNetworkLocation(curRack, excludeNodes, ensemble, ensemble,
!enforceMinNumRacksPerWriteQuorum || firstBookieInTheEnsemble);
} else {
throw e;
}
}
}
List<BookieId> bookieList = ensemble.toList();
if (ensembleSize != bookieList.size()) {
LOG.error("Not enough {} bookies are available to form an ensemble : {}.",
ensembleSize, bookieList);
throw new BKNotEnoughBookiesException();
}
return PlacementResult.of(bookieList,
isEnsembleAdheringToPlacementPolicy(
bookieList, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
rwLock.readLock().lock();
try {
excludeBookies = addDefaultRackBookiesIfMinNumRacksIsEnforced(excludeBookies);
excludeBookies.addAll(currentEnsemble);
BookieNode bn = knownBookies.get(bookieToReplace);
if (null == bn) {
bn = createBookieNode(bookieToReplace);
}
Set<Node> ensembleNodes = convertBookiesToNodes(currentEnsemble);
Set<Node> excludeNodes = convertBookiesToNodes(excludeBookies);
excludeNodes.addAll(ensembleNodes);
excludeNodes.add(bn);
ensembleNodes.remove(bn);
Set<String> networkLocationsToBeExcluded = getNetworkLocations(ensembleNodes);
if (LOG.isDebugEnabled()) {
LOG.debug("Try to choose a new bookie to replace {} from ensemble {}, excluding {}.",
bookieToReplace, ensembleNodes, excludeNodes);
}
// pick a candidate from same rack to replace
BookieNode candidate = selectFromNetworkLocation(
bn.getNetworkLocation(),
networkLocationsToBeExcluded,
excludeNodes,
TruePredicate.INSTANCE,
EnsembleForReplacementWithNoConstraints.INSTANCE,
!enforceMinNumRacksPerWriteQuorum);
if (LOG.isDebugEnabled()) {
LOG.debug("Bookie {} is chosen to replace bookie {}.", candidate, bn);
}
BookieId candidateAddr = candidate.getAddr();
List<BookieId> newEnsemble = new ArrayList<BookieId>(currentEnsemble);
if (currentEnsemble.isEmpty()) {
/*
* in testing code there are test cases which would pass empty
* currentEnsemble
*/
newEnsemble.add(candidateAddr);
} else {
newEnsemble.set(currentEnsemble.indexOf(bookieToReplace), candidateAddr);
}
return PlacementResult.of(candidateAddr,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
} finally {
rwLock.readLock().unlock();
}
}
@Override
public BookieNode selectFromNetworkLocation(
String networkLoc,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKNotEnoughBookiesException {
// select one from local rack
try {
return selectRandomFromRack(networkLoc, excludeBookies, predicate, ensemble);
} catch (BKNotEnoughBookiesException e) {
if (!fallbackToRandom) {
LOG.error(
"Failed to choose a bookie from {} : "
+ "excluded {}, enforceMinNumRacksPerWriteQuorum is enabled so giving up.",
networkLoc, excludeBookies);
throw e;
}
LOG.warn("Failed to choose a bookie from network location {}, "
+ "the bookies in the network location are {}, excluded bookies {}, "
+ "current ensemble {}, fallback to choose bookie randomly from the cluster.",
networkLoc, topology.getLeaves(networkLoc), excludeBookies, ensemble);
// randomly choose one from whole cluster, ignore the provided predicate.
return selectRandom(1, excludeBookies, predicate, ensemble).get(0);
}
}
@Override
public BookieNode selectFromNetworkLocation(String networkLoc,
Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKNotEnoughBookiesException {
// first attempt to select one from local rack
try {
return selectRandomFromRack(networkLoc, excludeBookies, predicate, ensemble);
} catch (BKNotEnoughBookiesException e) {
/*
* there is no enough bookie from local rack, select bookies from
* the whole cluster and exclude the racks specified at
* <tt>excludeRacks</tt>.
*/
LOG.warn("Failed to choose a bookie node from network location {}, "
+ "the bookies in the network location are {}, excluded bookies {}, "
+ "current ensemble {}, fallback to choose bookie randomly from the cluster.",
networkLoc, topology.getLeaves(networkLoc), excludeBookies, ensemble);
return selectFromNetworkLocation(excludeRacks, excludeBookies, predicate, ensemble, fallbackToRandom);
}
}
/**
* It randomly selects a {@link BookieNode} that is not on the <i>excludeRacks</i> set, excluding the nodes in
* <i>excludeBookies</i> set. If it fails to find one, it selects a random {@link BookieNode} from the whole
* cluster.
*/
@Override
public BookieNode selectFromNetworkLocation(Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKNotEnoughBookiesException {
List<BookieNode> knownNodes = new ArrayList<>(knownBookies.values());
Set<Node> fullExclusionBookiesList = new HashSet<Node>(excludeBookies);
for (BookieNode knownNode : knownNodes) {
if (excludeRacks.contains(knownNode.getNetworkLocation())) {
fullExclusionBookiesList.add(knownNode);
}
}
try {
return selectRandomInternal(knownNodes, 1, fullExclusionBookiesList, predicate, ensemble).get(0);
} catch (BKNotEnoughBookiesException e) {
if (!fallbackToRandom) {
LOG.error(
"Failed to choose a bookie excluding Racks: {} "
+ "Nodes: {}, enforceMinNumRacksPerWriteQuorum is enabled so giving up.",
excludeRacks, excludeBookies);
throw e;
}
LOG.warn("Failed to choose a bookie: excluded {}, fallback to choose bookie randomly from the cluster.",
excludeBookies);
// randomly choose one from whole cluster
return selectRandom(1, excludeBookies, predicate, ensemble).get(0);
}
}
private WeightedRandomSelection<BookieNode> prepareForWeightedSelection(List<Node> leaves) {
// create a map of bookieNode->freeDiskSpace for this rack. The assumption is that
// the number of nodes in a rack is of the order of 40, so it shouldn't be too bad
// to build it every time during a ledger creation
Map<BookieNode, WeightedObject> rackMap = new HashMap<BookieNode, WeightedObject>();
for (Node n : leaves) {
if (!(n instanceof BookieNode)) {
continue;
}
BookieNode bookie = (BookieNode) n;
if (this.bookieInfoMap.containsKey(bookie)) {
rackMap.put(bookie, this.bookieInfoMap.get(bookie));
} else {
rackMap.put(bookie, new BookieInfo());
}
}
if (rackMap.size() == 0) {
return null;
}
WeightedRandomSelection<BookieNode> wRSelection = new WeightedRandomSelectionImpl<BookieNode>(
maxWeightMultiple);
wRSelection.updateMap(rackMap);
return wRSelection;
}
/**
* Choose random node under a given network path.
*
* @param netPath
* network path
* @param excludeBookies
* exclude bookies
* @param predicate
* predicate to check whether the target is a good target.
* @param ensemble
* ensemble structure
* @return chosen bookie.
*/
protected BookieNode selectRandomFromRack(String netPath, Set<Node> excludeBookies, Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble) throws BKNotEnoughBookiesException {
WeightedRandomSelection<BookieNode> wRSelection = null;
List<Node> leaves = new ArrayList<Node>(topology.getLeaves(netPath));
if (!this.isWeighted) {
Collections.shuffle(leaves);
} else {
if (CollectionUtils.subtract(leaves, excludeBookies).size() < 1) {
throw new BKNotEnoughBookiesException();
}
wRSelection = prepareForWeightedSelection(leaves);
if (wRSelection == null) {
throw new BKNotEnoughBookiesException();
}
}
Iterator<Node> it = leaves.iterator();
Set<Node> bookiesSeenSoFar = new HashSet<Node>();
while (true) {
Node n;
if (isWeighted) {
if (bookiesSeenSoFar.size() == leaves.size()) {
// Don't loop infinitely.
break;
}
n = wRSelection.getNextRandom();
bookiesSeenSoFar.add(n);
} else {
if (it.hasNext()) {
n = it.next();
} else {
break;
}
}
if (excludeBookies.contains(n)) {
continue;
}
if (!(n instanceof BookieNode) || !predicate.apply((BookieNode) n, ensemble)) {
continue;
}
BookieNode bn = (BookieNode) n;
// got a good candidate
if (ensemble.addNode(bn)) {
// add the candidate to exclude set
excludeBookies.add(bn);
}
return bn;
}
throw new BKNotEnoughBookiesException();
}
/**
* Choose a random node from whole cluster.
*
* @param numBookies
* number bookies to choose
* @param excludeBookies
* bookies set to exclude.
* @param ensemble
* ensemble to hold the bookie chosen.
* @return the bookie node chosen.
* @throws BKNotEnoughBookiesException
*/
protected List<BookieNode> selectRandom(int numBookies,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble)
throws BKNotEnoughBookiesException {
return selectRandomInternal(null, numBookies, excludeBookies, predicate, ensemble);
}
protected List<BookieNode> selectRandomInternal(List<BookieNode> bookiesToSelectFrom,
int numBookies,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble)
throws BKNotEnoughBookiesException {
WeightedRandomSelection<BookieNode> wRSelection = null;
if (bookiesToSelectFrom == null) {
// If the list is null, we need to select from the entire knownBookies set
wRSelection = this.weightedSelection;
bookiesToSelectFrom = new ArrayList<BookieNode>(knownBookies.values());
}
if (isWeighted) {
if (CollectionUtils.subtract(bookiesToSelectFrom, excludeBookies).size() < numBookies) {
throw new BKNotEnoughBookiesException();
}
if (wRSelection == null) {
wRSelection = new WeightedRandomSelectionImpl<BookieNode>(this.maxWeightMultiple);
}
Map<BookieNode, WeightedObject> rackMap = new HashMap<BookieNode, WeightedObject>();
for (BookieNode n : bookiesToSelectFrom) {
if (excludeBookies.contains(n)) {
continue;
}
if (this.bookieInfoMap.containsKey(n)) {
rackMap.put(n, this.bookieInfoMap.get(n));
} else {
rackMap.put(n, new BookieInfo());
}
}
wRSelection.updateMap(rackMap);
} else {
Collections.shuffle(bookiesToSelectFrom);
}
BookieNode bookie;
List<BookieNode> newBookies = new ArrayList<BookieNode>(numBookies);
Iterator<BookieNode> it = bookiesToSelectFrom.iterator();
Set<BookieNode> bookiesSeenSoFar = new HashSet<BookieNode>();
while (numBookies > 0) {
if (isWeighted) {
if (bookiesSeenSoFar.size() == bookiesToSelectFrom.size()) {
// If we have gone through the whole available list of bookies,
// and yet haven't been able to satisfy the ensemble request, bail out.
// We don't want to loop infinitely.
break;
}
bookie = wRSelection.getNextRandom();
bookiesSeenSoFar.add(bookie);
} else {
if (it.hasNext()) {
bookie = it.next();
} else {
break;
}
}
if (excludeBookies.contains(bookie)) {
continue;
}
// When durability is being enforced; we must not violate the
// predicate even when selecting a random bookie; as durability
// guarantee is not best effort; correctness is implied by it
if (enforceDurability && !predicate.apply(bookie, ensemble)) {
continue;
}
if (ensemble.addNode(bookie)) {
excludeBookies.add(bookie);
newBookies.add(bookie);
--numBookies;
}
}
if (numBookies == 0) {
return newBookies;
}
LOG.warn("Failed to find {} bookies : excludeBookies {}, allBookies {}.",
numBookies, excludeBookies, bookiesToSelectFrom);
throw new BKNotEnoughBookiesException();
}
@Override
public void registerSlowBookie(BookieId bookieSocketAddress, long entryId) {
if (reorderThresholdPendingRequests <= 0) {
// only put bookies on slowBookies list if reorderThresholdPendingRequests is *not* set (0);
// otherwise, rely on reordering of reads based on reorderThresholdPendingRequests
slowBookies.put(bookieSocketAddress, entryId);
}
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
Map<Integer, String> writeSetWithRegion = new HashMap<>();
for (int i = 0; i < writeSet.size(); i++) {
writeSetWithRegion.put(writeSet.get(i), "");
}
return reorderReadSequenceWithRegion(
ensemble, writeSet, writeSetWithRegion, bookiesHealthInfo, false, "", writeSet.size());
}
/**
* This function orders the read sequence with a given region. For region-unaware policies (e.g.
* RackAware), we pass in false for regionAware and an empty myRegion. When this happens, any
* remote list will stay empty. The ordering is as follows (the R* at the beginning of each list item
* is only present for region aware policies).
* 1. available (local) bookies
* 2. R* a remote bookie (based on remoteNodeInReorderSequence
* 3. R* remaining (local) bookies
* 4. R* remaining remote bookies
* 5. read only bookies
* 6. slow bookies
* 7. unavailable bookies
*
* @param ensemble
* ensemble of bookies
* @param writeSet
* write set
* @param writeSetWithRegion
* write set with region information
* @param bookiesHealthInfo
* heuristics about health of boookies
* @param regionAware
* whether or not a region-aware policy is used
* @param myRegion
* current region of policy
* @param remoteNodeInReorderSequence
* number of local bookies to try before trying a remote bookie
* @return ordering of bookies to send read to
*/
DistributionSchedule.WriteSet reorderReadSequenceWithRegion(
List<BookieId> ensemble,
DistributionSchedule.WriteSet writeSet,
Map<Integer, String> writeSetWithRegion,
BookiesHealthInfo bookiesHealthInfo,
boolean regionAware,
String myRegion,
int remoteNodeInReorderSequence) {
boolean useRegionAware = regionAware && (!myRegion.equals(UNKNOWN_REGION));
int ensembleSize = ensemble.size();
// For rack aware, If all the bookies in the write set are available, simply return the original write set,
// to avoid creating more lists
boolean isAnyBookieUnavailable = false;
if (useRegionAware || reorderReadsRandom) {
isAnyBookieUnavailable = true;
} else {
for (int i = 0; i < ensemble.size(); i++) {
BookieId bookieAddr = ensemble.get(i);
if ((!knownBookies.containsKey(bookieAddr) && !readOnlyBookies.contains(bookieAddr))
|| slowBookies.getIfPresent(bookieAddr) != null) {
// Found at least one bookie not available in the ensemble, or in slowBookies
isAnyBookieUnavailable = true;
break;
}
}
}
boolean reordered = false;
if (reorderThresholdPendingRequests > 0) {
// if there are no slow or unavailable bookies, capture each bookie's number of
// pending request to reorder requests based on a threshold of pending requests
// number of pending requests per bookie (same index as writeSet)
long[] pendingReqs = new long[writeSet.size()];
int bestBookieIdx = -1;
for (int i = 0; i < writeSet.size(); i++) {
pendingReqs[i] = bookiesHealthInfo.getBookiePendingRequests(ensemble.get(writeSet.get(i)));
if (bestBookieIdx < 0 || pendingReqs[i] < pendingReqs[bestBookieIdx]) {
bestBookieIdx = i;
}
}
// reorder the writeSet if the currently first bookie in our writeSet has at
// least
// reorderThresholdPendingRequests more outstanding request than the best bookie
if (bestBookieIdx > 0 && pendingReqs[0] >= pendingReqs[bestBookieIdx] + reorderThresholdPendingRequests) {
// We're not reordering the entire write set, but only move the best bookie
// to the first place. Chances are good that this bookie will be fast enough
// to not trigger the speculativeReadTimeout. But even if it hits that timeout,
// things may have changed by then so much that whichever bookie we put second
// may actually not be the second-best choice any more.
if (LOG.isDebugEnabled()) {
LOG.debug("read set reordered from {} ({} pending) to {} ({} pending)",
ensemble.get(writeSet.get(0)), pendingReqs[0], ensemble.get(writeSet.get(bestBookieIdx)),
pendingReqs[bestBookieIdx]);
}
writeSet.moveAndShift(bestBookieIdx, 0);
reordered = true;
}
}
if (!isAnyBookieUnavailable) {
if (reordered) {
readReorderedCounter.registerSuccessfulValue(1);
}
return writeSet;
}
for (int i = 0; i < writeSet.size(); i++) {
int idx = writeSet.get(i);
BookieId address = ensemble.get(idx);
String region = writeSetWithRegion.get(idx);
Long lastFailedEntryOnBookie = bookiesHealthInfo.getBookieFailureHistory(address);
if (null == knownBookies.get(address)) {
// there isn't too much differences between readonly bookies
// from unavailable bookies. since there
// is no write requests to them, so we shouldn't try reading
// from readonly bookie prior to writable bookies.
if ((null == readOnlyBookies)
|| !readOnlyBookies.contains(address)) {
writeSet.set(i, idx | UNAVAIL_MASK);
} else {
if (slowBookies.getIfPresent(address) != null) {
long numPendingReqs = bookiesHealthInfo.getBookiePendingRequests(address);
// use slow bookies with less pending requests first
long slowIdx = numPendingReqs * ensembleSize + idx;
writeSet.set(i, (int) (slowIdx & ~MASK_BITS) | SLOW_MASK);
} else {
writeSet.set(i, idx | READ_ONLY_MASK);
}
}
} else if (lastFailedEntryOnBookie < 0) {
if (slowBookies.getIfPresent(address) != null) {
long numPendingReqs = bookiesHealthInfo.getBookiePendingRequests(address);
long slowIdx = numPendingReqs * ensembleSize + idx;
writeSet.set(i, (int) (slowIdx & ~MASK_BITS) | SLOW_MASK);
} else {
if (useRegionAware && !myRegion.equals(region)) {
writeSet.set(i, idx | REMOTE_MASK);
} else {
writeSet.set(i, idx | LOCAL_MASK);
}
}
} else {
// use bookies with earlier failed entryIds first
long failIdx = lastFailedEntryOnBookie * ensembleSize + idx;
if (useRegionAware && !myRegion.equals(region)) {
writeSet.set(i, (int) (failIdx & ~MASK_BITS) | REMOTE_FAIL_MASK);
} else {
writeSet.set(i, (int) (failIdx & ~MASK_BITS) | LOCAL_FAIL_MASK);
}
}
}
// Add a mask to ensure the sort is stable, sort,
// and then remove mask. This maintains stability as
// long as there are fewer than 16 bookies in the write set.
for (int i = 0; i < writeSet.size(); i++) {
writeSet.set(i, writeSet.get(i) | ((i & 0xF) << 20));
}
writeSet.sort();
for (int i = 0; i < writeSet.size(); i++) {
writeSet.set(i, writeSet.get(i) & ~((0xF) << 20));
}
if (reorderReadsRandom) {
shuffleWithMask(writeSet, LOCAL_MASK, MASK_BITS);
shuffleWithMask(writeSet, REMOTE_MASK, MASK_BITS);
shuffleWithMask(writeSet, READ_ONLY_MASK, MASK_BITS);
shuffleWithMask(writeSet, UNAVAIL_MASK, MASK_BITS);
}
// nodes within a region are ordered as follows
// (Random?) list of nodes that have no history of failure
// Nodes with Failure history are ordered in the reverse
// order of the most recent entry that generated an error
// The sort will have put them in correct order,
// so remove the bits that sort by age.
for (int i = 0; i < writeSet.size(); i++) {
int mask = writeSet.get(i) & MASK_BITS;
int idx = (writeSet.get(i) & ~MASK_BITS) % ensembleSize;
if (mask == LOCAL_FAIL_MASK) {
writeSet.set(i, LOCAL_MASK | idx);
} else if (mask == REMOTE_FAIL_MASK) {
writeSet.set(i, REMOTE_MASK | idx);
} else if (mask == SLOW_MASK) {
writeSet.set(i, SLOW_MASK | idx);
}
}
// Insert a node from the remote region at the specified location so
// we try more than one region within the max allowed latency
int firstRemote = -1;
for (int i = 0; i < writeSet.size(); i++) {
if ((writeSet.get(i) & MASK_BITS) == REMOTE_MASK) {
firstRemote = i;
break;
}
}
if (firstRemote != -1) {
int i = 0;
for (; i < remoteNodeInReorderSequence
&& i < writeSet.size(); i++) {
if ((writeSet.get(i) & MASK_BITS) != LOCAL_MASK) {
break;
}
}
writeSet.moveAndShift(firstRemote, i);
}
// remove all masks
for (int i = 0; i < writeSet.size(); i++) {
writeSet.set(i, writeSet.get(i) & ~MASK_BITS);
}
readReorderedCounter.registerSuccessfulValue(1);
return writeSet;
}
// this method should be called in readlock scope of 'rwlock'
@Override
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
if (CollectionUtils.isEmpty(ensembleList)) {
return PlacementPolicyAdherence.FAIL;
}
int ensembleSize = ensembleList.size();
int minNumRacksPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
HashSet<String> racksInQuorum = new HashSet<String>();
BookieId bookie;
for (int i = 0; i < ensembleList.size(); i++) {
racksInQuorum.clear();
for (int j = 0; j < writeQuorumSize; j++) {
bookie = ensembleList.get((i + j) % ensembleSize);
try {
if (knownBookies.containsKey(bookie)) {
racksInQuorum.add(knownBookies.get(bookie).getNetworkLocation());
} else if (LOG.isDebugEnabled()) {
LOG.debug("bookie {} is not in the list of knownBookies", bookie);
}
} catch (Exception e) {
/*
* any issue/exception in analyzing whether ensemble is
* strictly adhering to placement policy should be
* swallowed.
*/
LOG.warn("Received exception while trying to get network location of bookie: {}", bookie, e);
}
}
if ((racksInQuorum.size() < minNumRacksPerWriteQuorumForThisEnsemble)
|| (enforceMinNumRacksPerWriteQuorum && racksInQuorum.contains(getDefaultRack()))) {
return PlacementPolicyAdherence.FAIL;
}
}
return PlacementPolicyAdherence.MEETS_STRICT;
}
@Override
public boolean areAckedBookiesAdheringToPlacementPolicy(Set<BookieId> ackedBookies,
int writeQuorumSize,
int ackQuorumSize) {
HashSet<String> rackCounter = new HashSet<>();
int minWriteQuorumNumRacksPerWriteQuorum = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
ReentrantReadWriteLock.ReadLock readLock = rwLock.readLock();
readLock.lock();
try {
for (BookieId bookie : ackedBookies) {
if (knownBookies.containsKey(bookie)) {
rackCounter.add(knownBookies.get(bookie).getNetworkLocation());
} else if (LOG.isDebugEnabled()) {
LOG.debug("bookie {} is not in the list of knownBookies", bookie);
}
}
// Check to make sure that ensemble is writing to `minNumberOfRacks`'s number of racks at least.
if (LOG.isDebugEnabled()) {
LOG.debug("areAckedBookiesAdheringToPlacementPolicy returning {} because number of racks = {} and "
+ "minNumRacksPerWriteQuorum = {}",
rackCounter.size() >= minNumRacksPerWriteQuorum,
rackCounter.size(),
minNumRacksPerWriteQuorum);
}
} finally {
readLock.unlock();
}
return rackCounter.size() >= minWriteQuorumNumRacksPerWriteQuorum;
}
@Override
public PlacementResult<List<BookieId>> replaceToAdherePlacementPolicy(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
List<BookieId> currentEnsemble) {
rwLock.readLock().lock();
try {
PlacementPolicyAdherence currentPlacementAdherence = isEnsembleAdheringToPlacementPolicy(
currentEnsemble, writeQuorumSize, ackQuorumSize);
if (PlacementPolicyAdherence.FAIL != currentPlacementAdherence) {
return PlacementResult.of(new ArrayList<>(currentEnsemble), currentPlacementAdherence);
}
for (BookieId bookieId : currentEnsemble) {
if (!knownBookies.containsKey(bookieId)) {
excludeBookies.add(bookieId);
}
}
int minNumRacksPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
int numRacks = topology.getNumOfRacks();
// only one rack or less than minNumRacksPerWriteQuorumForThisEnsemble, stop calculation to skip relocation
if (numRacks < 2 || numRacks < minNumRacksPerWriteQuorumForThisEnsemble) {
LOG.warn("Skip ensemble relocation because the cluster has only {} rack.", numRacks);
return PlacementResult.of(Collections.emptyList(), PlacementPolicyAdherence.FAIL);
}
PlacementResult<List<BookieId>> placementResult = PlacementResult.of(Collections.emptyList(),
PlacementPolicyAdherence.FAIL);
int minDiffer = Integer.MAX_VALUE;
for (int i = 0; i < currentEnsemble.size(); i++) {
PlacementResult<List<BookieId>> result = doReplaceToAdherePlacementPolicy(ensembleSize,
writeQuorumSize, ackQuorumSize, excludeBookies, currentEnsemble, i);
if (PlacementPolicyAdherence.FAIL == result.getAdheringToPolicy()) {
continue;
}
int differ = differBetweenBookies(currentEnsemble, result.getResult());
if (differ < minDiffer) {
minDiffer = differ;
placementResult = result;
if (minDiffer == 1) {
break;
}
}
}
return placementResult;
} finally {
rwLock.readLock().unlock();
}
}
private PlacementResult<List<BookieId>> doReplaceToAdherePlacementPolicy(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
List<BookieId> currentEnsemble,
int startIndex) {
final List<BookieNode> provisionalEnsembleNodes = currentEnsemble.stream()
.map(this::convertBookieToNode).collect(Collectors.toList());
final Set<Node> excludeNodes = convertBookiesToNodes(
addDefaultRackBookiesIfMinNumRacksIsEnforced(excludeBookies));
int minNumRacksPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
final RRTopologyAwareCoverageEnsemble ensemble =
new RRTopologyAwareCoverageEnsemble(
ensembleSize,
writeQuorumSize,
ackQuorumSize,
RACKNAME_DISTANCE_FROM_LEAVES,
null,
null,
minNumRacksPerWriteQuorumForThisEnsemble);
BookieNode prevNode = null;
final BookieNode firstNode = provisionalEnsembleNodes.get(startIndex);
// use same bookie at first to reduce ledger replication
if (!excludeNodes.contains(firstNode) && ensemble.apply(firstNode, ensemble)
&& ensemble.addNode(firstNode)) {
excludeNodes.add(firstNode);
prevNode = firstNode;
}
for (int i = prevNode == null ? 0 : 1; i < ensembleSize; i++) {
int index = (startIndex + i) % ensembleSize;
final String curRack;
if (null == prevNode) {
if ((null == localNode) || defaultRack.equals(localNode.getNetworkLocation())) {
curRack = NodeBase.ROOT;
} else {
curRack = localNode.getNetworkLocation();
}
} else {
curRack = NetworkTopologyImpl.INVERSE + prevNode.getNetworkLocation();
}
try {
prevNode = replaceToAdherePlacementPolicyInternal(
curRack, excludeNodes, ensemble, ensemble,
provisionalEnsembleNodes, index, ensembleSize, minNumRacksPerWriteQuorumForThisEnsemble);
// got a good candidate
if (ensemble.addNode(prevNode)) {
// add the candidate to exclude set
excludeNodes.add(prevNode);
} else {
throw new BKNotEnoughBookiesException();
}
// replace to newer node
provisionalEnsembleNodes.set(index, prevNode);
} catch (BKNotEnoughBookiesException e) {
LOG.warn("Skip ensemble relocation because the cluster has not enough bookies.");
return PlacementResult.of(Collections.emptyList(), PlacementPolicyAdherence.FAIL);
}
}
List<BookieId> bookieList = ensemble.toList();
if (ensembleSize != bookieList.size()) {
LOG.warn("Not enough {} bookies are available to form an ensemble : {}.",
ensembleSize, bookieList);
return PlacementResult.of(Collections.emptyList(), PlacementPolicyAdherence.FAIL);
}
PlacementPolicyAdherence placementPolicyAdherence = isEnsembleAdheringToPlacementPolicy(bookieList,
writeQuorumSize, ackQuorumSize);
if (PlacementPolicyAdherence.FAIL == placementPolicyAdherence) {
return PlacementResult.of(Collections.emptyList(), PlacementPolicyAdherence.FAIL);
}
return PlacementResult.of(revertBookieListByIndex(bookieList, startIndex), placementPolicyAdherence);
}
private List<BookieId> revertBookieListByIndex(List<BookieId> bookies, int startIndex) {
BookieId[] bookieIds = new BookieId[bookies.size()];
for (int i = 0; i < bookies.size(); i++) {
if (startIndex == bookies.size()) {
startIndex = 0;
}
bookieIds[startIndex++] = bookies.get(i);
}
return Lists.newArrayList(bookieIds);
}
private BookieNode replaceToAdherePlacementPolicyInternal(
String netPath, Set<Node> excludeBookies, Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble, List<BookieNode> provisionalEnsembleNodes, int ensembleIndex,
int ensembleSize, int minNumRacksPerWriteQuorumForThisEnsemble) throws BKNotEnoughBookiesException {
final BookieNode currentNode = provisionalEnsembleNodes.get(ensembleIndex);
// if the current bookie could be applied to the ensemble, apply it to minify the number of bookies replaced
if (!excludeBookies.contains(currentNode) && predicate.apply(currentNode, ensemble)) {
return currentNode;
}
final List<Pair<String, List<BookieNode>>> conditionList = new ArrayList<>();
final Set<String> preExcludeRacks = new HashSet<>();
final Set<String> postExcludeRacks = new HashSet<>();
for (int i = 0; i < minNumRacksPerWriteQuorumForThisEnsemble - 1; i++) {
preExcludeRacks.add(provisionalEnsembleNodes.get(Math.floorMod((ensembleIndex - i - 1), ensembleSize))
.getNetworkLocation());
postExcludeRacks.add(provisionalEnsembleNodes.get(Math.floorMod((ensembleIndex + i + 1), ensembleSize))
.getNetworkLocation());
}
// adhere minNumRacksPerWriteQuorum by preExcludeRacks
// avoid additional replace from write quorum candidates by preExcludeRacks and postExcludeRacks
// avoid to use first candidate bookies for election by provisionalEnsembleNodes
conditionList.add(Pair.of(
NetworkTopologyImpl.INVERSE + String.join(",",
Stream.concat(preExcludeRacks.stream(), postExcludeRacks.stream()).collect(Collectors.toSet())),
provisionalEnsembleNodes
));
// avoid to use same rack between previous index by netPath
// avoid to use first candidate bookies for election by provisionalEnsembleNodes
conditionList.add(Pair.of(netPath, provisionalEnsembleNodes));
// avoid to use same rack between previous index by netPath
conditionList.add(Pair.of(netPath, Collections.emptyList()));
for (Pair<String, List<BookieNode>> condition : conditionList) {
WeightedRandomSelection<BookieNode> wRSelection = null;
final List<Node> leaves = new ArrayList<>(topology.getLeaves(condition.getLeft()));
if (!isWeighted) {
Collections.shuffle(leaves);
} else {
if (CollectionUtils.subtract(leaves, excludeBookies).size() < 1) {
throw new BKNotEnoughBookiesException();
}
wRSelection = prepareForWeightedSelection(leaves);
if (wRSelection == null) {
throw new BKNotEnoughBookiesException();
}
}
final Iterator<Node> it = leaves.iterator();
final Set<Node> bookiesSeenSoFar = new HashSet<>();
while (true) {
Node n;
if (isWeighted) {
if (bookiesSeenSoFar.size() == leaves.size()) {
// Don't loop infinitely.
break;
}
n = wRSelection.getNextRandom();
bookiesSeenSoFar.add(n);
} else {
if (it.hasNext()) {
n = it.next();
} else {
break;
}
}
if (excludeBookies.contains(n)) {
continue;
}
if (!(n instanceof BookieNode) || !predicate.apply((BookieNode) n, ensemble)) {
continue;
}
// additional excludeBookies
if (condition.getRight().contains(n)) {
continue;
}
BookieNode bn = (BookieNode) n;
return bn;
}
}
throw new BKNotEnoughBookiesException();
}
}
| 359 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithMetadataBookieDriver;
import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithRegistrationManager;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.UncheckedExecutionException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Predicate;
import lombok.SneakyThrows;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.BookieImpl;
import org.apache.bookkeeper.client.AsyncCallback.OpenCallback;
import org.apache.bookkeeper.client.AsyncCallback.RecoverCallback;
import org.apache.bookkeeper.client.EnsemblePlacementPolicy.PlacementPolicyAdherence;
import org.apache.bookkeeper.client.LedgerFragmentReplicator.SingleFragmentCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncOpenCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncReadCallback;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationClient.RegistrationListener;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.meta.LedgerAuditorManager;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRangeIterator;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.LedgerUnderreplicationManager;
import org.apache.bookkeeper.meta.MetadataBookieDriver;
import org.apache.bookkeeper.meta.UnderreplicatedLedger;
import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.MultiCallback;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.Processor;
import org.apache.bookkeeper.replication.BookieLedgerIndexer;
import org.apache.bookkeeper.replication.ReplicationException.BKAuditException;
import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException;
import org.apache.bookkeeper.replication.ReplicationException.UnavailableException;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.AvailabilityOfEntriesOfLedger;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Admin client for BookKeeper clusters.
*/
public class BookKeeperAdmin implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(BookKeeperAdmin.class);
private static final Logger VERBOSE = LoggerFactory.getLogger("verbose");
private static final BiConsumer<Long, Long> NOOP_BICONSUMER = (l, e) -> { };
// BookKeeper client instance
private BookKeeper bkc;
private final boolean ownsBK;
// LedgerFragmentReplicator instance
private LedgerFragmentReplicator lfr;
private LedgerManagerFactory mFactory;
/*
* underreplicationManager is not initialized as part of constructor use its
* getter (getUnderreplicationManager) so that it can be lazy-initialized
*/
private LedgerUnderreplicationManager underreplicationManager;
private LedgerAuditorManager ledgerAuditorManager;
/**
* Constructor that takes in a ZooKeeper servers connect string so we know
* how to connect to ZooKeeper to retrieve information about the BookKeeper
* cluster. We need this before we can do any type of admin operations on
* the BookKeeper cluster.
*
* @param zkServers
* Comma separated list of hostname:port pairs for the ZooKeeper
* servers cluster.
* @throws IOException
* throws this exception if there is an error instantiating the
* ZooKeeper client.
* @throws InterruptedException
* Throws this exception if there is an error instantiating the
* BookKeeper client.
* @throws BKException
* Throws this exception if there is an error instantiating the
* BookKeeper client.
*/
public BookKeeperAdmin(String zkServers) throws IOException, InterruptedException, BKException {
this(new ClientConfiguration().setMetadataServiceUri("zk+null://" + zkServers + "/ledgers"));
}
/**
* Constructor that takes in a configuration object so we know
* how to connect to ZooKeeper to retrieve information about the BookKeeper
* cluster. We need this before we can do any type of admin operations on
* the BookKeeper cluster.
*
* @param conf
* Client Configuration Object
* @throws IOException
* throws this exception if there is an error instantiating the
* ZooKeeper client.
* @throws InterruptedException
* Throws this exception if there is an error instantiating the
* BookKeeper client.
* @throws BKException
* Throws this exception if there is an error instantiating the
* BookKeeper client.
*/
public BookKeeperAdmin(ClientConfiguration conf) throws IOException, InterruptedException, BKException {
// Create the BookKeeper client instance
bkc = new BookKeeper(conf);
ownsBK = true;
this.lfr = new LedgerFragmentReplicator(bkc, NullStatsLogger.INSTANCE, conf);
this.mFactory = bkc.ledgerManagerFactory;
}
/**
* Constructor that takes in a BookKeeper instance . This will be useful,
* when user already has bk instance ready.
*
* @param bkc
* - bookkeeper instance
* @param statsLogger
* - stats logger
*/
public BookKeeperAdmin(final BookKeeper bkc, StatsLogger statsLogger, ClientConfiguration conf) {
Objects.requireNonNull(conf, "Client configuration cannot be null");
this.bkc = bkc;
ownsBK = false;
this.lfr = new LedgerFragmentReplicator(bkc, statsLogger, conf);
this.mFactory = bkc.ledgerManagerFactory;
}
public BookKeeperAdmin(final BookKeeper bkc, ClientConfiguration conf) {
this(bkc, NullStatsLogger.INSTANCE, conf);
}
public BookKeeperAdmin(final BookKeeper bkc) {
this.bkc = bkc;
ownsBK = false;
this.mFactory = bkc.ledgerManagerFactory;
}
public ClientConfiguration getConf() {
return bkc.getConf();
}
/**
* Gracefully release resources that this client uses.
*
* @throws InterruptedException
* if there is an error shutting down the clients that this
* class uses.
*/
@Override
public void close() throws InterruptedException, BKException {
if (ownsBK) {
bkc.close();
}
if (ledgerAuditorManager != null) {
try {
ledgerAuditorManager.close();
} catch (Exception e) {
throw new BKException.MetaStoreException(e);
}
}
}
/**
* Get a list of the available bookies.
*
* @return a collection of bookie addresses
*/
public Collection<BookieId> getAvailableBookies()
throws BKException {
return bkc.bookieWatcher.getBookies();
}
/**
* Get a list of all bookies including the not available ones.
*
* @return a collection of bookie addresses
*/
public Collection<BookieId> getAllBookies()
throws BKException {
return bkc.bookieWatcher.getAllBookies();
}
public BookieAddressResolver getBookieAddressResolver() {
return bkc.bookieWatcher.getBookieAddressResolver();
}
@SneakyThrows
public BookieServiceInfo getBookieServiceInfo(BookieId bookiedId)
throws BKException {
return FutureUtils.result(bkc.getMetadataClientDriver()
.getRegistrationClient().getBookieServiceInfo(bookiedId)).getValue();
}
/**
* Get a list of readonly bookies synchronously.
*
* @return a collection of bookie addresses
* @throws BKException if there are issues trying to read the list.
*/
public Collection<BookieId> getReadOnlyBookies() throws BKException {
return bkc.bookieWatcher.getReadOnlyBookies();
}
/**
* Notify when the available list of bookies changes.
* This is a one-shot notification. To receive subsequent notifications
* the listener must be registered again.
*
* @param listener the listener to notify
*/
public void watchWritableBookiesChanged(final RegistrationListener listener)
throws BKException {
bkc
.getMetadataClientDriver()
.getRegistrationClient()
.watchWritableBookies(listener);
}
/**
* Notify when the available list of read only bookies changes.
* This is a one-shot notification. To receive subsequent notifications
* the listener must be registered again.
*
* @param listener the listener to notify
*/
public void watchReadOnlyBookiesChanged(final RegistrationListener listener)
throws BKException {
bkc
.getMetadataClientDriver()
.getRegistrationClient()
.watchReadOnlyBookies(listener);
}
/**
* Open a ledger as an administrator. This means that no digest password
* checks are done. Otherwise, the call is identical to BookKeeper#asyncOpenLedger
*
* @param lId
* ledger identifier
* @param cb
* Callback which will receive a LedgerHandle object
* @param ctx
* optional context object, to be passwd to the callback (can be null)
*
* @see BookKeeper#asyncOpenLedger
*/
public void asyncOpenLedger(final long lId, final OpenCallback cb, final Object ctx) {
new LedgerOpenOp(bkc, bkc.getClientCtx().getClientStats(), lId, cb, ctx).initiate();
}
/**
* Open a ledger as an administrator. This means that no digest password
* checks are done. Otherwise, the call is identical to
* BookKeeper#openLedger
*
* @param lId
* - ledger identifier
* @see BookKeeper#openLedger
*/
public LedgerHandle openLedger(final long lId) throws InterruptedException,
BKException {
CompletableFuture<LedgerHandle> future = new CompletableFuture<>();
SyncOpenCallback result = new SyncOpenCallback(future);
new LedgerOpenOp(bkc, bkc.getClientCtx().getClientStats(), lId, result, null).initiate();
return SyncCallbackUtils.waitForResult(future);
}
/**
* Open a ledger as an administrator without recovering the ledger. This means
* that no digest password checks are done. Otherwise, the call is identical
* to BookKeeper#asyncOpenLedgerNoRecovery
*
* @param lId
* ledger identifier
* @param cb
* Callback which will receive a LedgerHandle object
* @param ctx
* optional context object, to be passwd to the callback (can be null)
*
* @see BookKeeper#asyncOpenLedgerNoRecovery
*/
public void asyncOpenLedgerNoRecovery(final long lId, final OpenCallback cb, final Object ctx) {
new LedgerOpenOp(bkc, bkc.getClientCtx().getClientStats(), lId, cb, ctx).initiateWithoutRecovery();
}
/**
* Open a ledger as an administrator without recovering the ledger. This
* means that no digest password checks are done. Otherwise, the call is
* identical to BookKeeper#openLedgerNoRecovery
*
* @param lId
* ledger identifier
* @see BookKeeper#openLedgerNoRecovery
*/
@SuppressWarnings("unchecked")
public LedgerHandle openLedgerNoRecovery(final long lId)
throws InterruptedException, BKException {
CompletableFuture<LedgerHandle> future = new CompletableFuture<>();
SyncOpenCallback result = new SyncOpenCallback(future);
new LedgerOpenOp(bkc, bkc.getClientCtx().getClientStats(), lId, result, null)
.initiateWithoutRecovery();
return SyncCallbackUtils.waitForResult(future);
}
/**
* Read entries from a ledger synchronously. If the lastEntry is -1, it will read all the entries in the ledger from
* the firstEntry.
*
* @param ledgerId
* @param firstEntry
* @param lastEntry
* @return
* @throws InterruptedException
* @throws BKException
*/
public Iterable<LedgerEntry> readEntries(long ledgerId, long firstEntry, long lastEntry)
throws InterruptedException, BKException {
checkArgument(ledgerId >= 0 && firstEntry >= 0);
return new LedgerEntriesIterable(ledgerId, firstEntry, lastEntry);
}
class LedgerEntriesIterable implements Iterable<LedgerEntry> {
final long ledgerId;
final long firstEntryId;
final long lastEntryId;
public LedgerEntriesIterable(long ledgerId, long firstEntry) {
this(ledgerId, firstEntry, -1);
}
public LedgerEntriesIterable(long ledgerId, long firstEntry, long lastEntry) {
this.ledgerId = ledgerId;
this.firstEntryId = firstEntry;
this.lastEntryId = lastEntry;
}
@Override
public Iterator<LedgerEntry> iterator() {
try {
return new LedgerEntriesIterator(ledgerId, firstEntryId, lastEntryId);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
class LedgerEntriesIterator implements Iterator<LedgerEntry> {
final LedgerHandle handle;
final long ledgerId;
final long lastEntryId;
long nextEntryId;
LedgerEntry currentEntry;
public LedgerEntriesIterator(long ledgerId, long firstEntry, long lastEntry)
throws InterruptedException, BKException {
this.handle = openLedgerNoRecovery(ledgerId);
this.ledgerId = ledgerId;
this.nextEntryId = firstEntry;
this.lastEntryId = lastEntry;
this.currentEntry = null;
}
@Override
public boolean hasNext() {
if (currentEntry != null) {
return true;
}
if ((lastEntryId == -1 || nextEntryId <= lastEntryId) && nextEntryId <= handle.getLastAddConfirmed()) {
try {
CompletableFuture<Enumeration<LedgerEntry>> result = new CompletableFuture<>();
handle.asyncReadEntriesInternal(nextEntryId, nextEntryId,
new SyncReadCallback(result), null, false);
currentEntry = SyncCallbackUtils.waitForResult(result).nextElement();
return true;
} catch (Exception e) {
if (e instanceof BKException.BKNoSuchEntryException && lastEntryId == -1) {
// there are no more entries in the ledger, so we just return false and ignore this exception
// since the last entry id was undefined
close();
return false;
}
LOG.error("Error reading entry {} from ledger {}", nextEntryId, ledgerId, e);
close();
throw new RuntimeException(e);
}
}
close();
return false;
}
@Override
public LedgerEntry next() {
if (lastEntryId > -1 && nextEntryId > lastEntryId) {
throw new NoSuchElementException();
}
++nextEntryId;
LedgerEntry entry = currentEntry;
currentEntry = null;
return entry;
}
@Override
public void remove() {
// noop
}
private void close() {
if (handle != null) {
try {
handle.close();
} catch (Exception e) {
LOG.error("Error closing ledger handle {}", handle, e);
}
}
}
}
// Object used for calling async methods and waiting for them to complete.
static class SyncObject {
boolean value;
int rc;
public SyncObject() {
value = false;
rc = BKException.Code.OK;
}
}
public SortedMap<Long, LedgerMetadata> getLedgersContainBookies(Set<BookieId> bookies)
throws InterruptedException, BKException {
final SyncObject sync = new SyncObject();
final AtomicReference<SortedMap<Long, LedgerMetadata>> resultHolder =
new AtomicReference<SortedMap<Long, LedgerMetadata>>(null);
asyncGetLedgersContainBookies(bookies, new GenericCallback<SortedMap<Long, LedgerMetadata>>() {
@Override
public void operationComplete(int rc, SortedMap<Long, LedgerMetadata> result) {
LOG.info("GetLedgersContainBookies completed with rc : {}", rc);
synchronized (sync) {
sync.rc = rc;
sync.value = true;
resultHolder.set(result);
sync.notify();
}
}
});
synchronized (sync) {
while (!sync.value) {
sync.wait();
}
}
if (sync.rc != BKException.Code.OK) {
throw BKException.create(sync.rc);
}
return resultHolder.get();
}
public void asyncGetLedgersContainBookies(final Set<BookieId> bookies,
final GenericCallback<SortedMap<Long, LedgerMetadata>> callback) {
final SortedMap<Long, LedgerMetadata> ledgers = new ConcurrentSkipListMap<Long, LedgerMetadata>();
bkc.getLedgerManager().asyncProcessLedgers(new Processor<Long>() {
@Override
public void process(final Long lid, final AsyncCallback.VoidCallback cb) {
bkc.getLedgerManager().readLedgerMetadata(lid)
.whenComplete((metadata, exception) -> {
if (BKException.getExceptionCode(exception)
== BKException.Code.NoSuchLedgerExistsOnMetadataServerException) {
// the ledger was deleted during this iteration.
cb.processResult(BKException.Code.OK, null, null);
return;
} else if (exception != null) {
cb.processResult(BKException.getExceptionCode(exception), null, null);
return;
}
Set<BookieId> bookiesInLedger =
LedgerMetadataUtils.getBookiesInThisLedger(metadata.getValue());
Sets.SetView<BookieId> intersection =
Sets.intersection(bookiesInLedger, bookies);
if (!intersection.isEmpty()) {
ledgers.put(lid, metadata.getValue());
}
cb.processResult(BKException.Code.OK, null, null);
});
}
}, new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
callback.operationComplete(rc, ledgers);
}
}, null, BKException.Code.OK, BKException.Code.MetaStoreException);
}
/**
* Synchronous method to rebuild and recover the ledger fragments data that
* was stored on the source bookie. That bookie could have failed completely
* and now the ledger data that was stored on it is under replicated. An
* optional destination bookie server could be given if we want to copy all
* of the ledger fragments data on the failed source bookie to it.
* Otherwise, we will just randomly distribute the ledger fragments to the
* active set of bookies, perhaps based on load. All ZooKeeper ledger
* metadata will be updated to point to the new bookie(s) that contain the
* replicated ledger fragments.
*
* @param bookieSrc
* Source bookie that had a failure. We want to replicate the
* ledger fragments that were stored there.
*/
public void recoverBookieData(final BookieId bookieSrc)
throws InterruptedException, BKException {
Set<BookieId> bookiesSrc = Sets.newHashSet(bookieSrc);
recoverBookieData(bookiesSrc);
}
public void recoverBookieData(final Set<BookieId> bookiesSrc)
throws InterruptedException, BKException {
recoverBookieData(bookiesSrc, false, false);
}
public void recoverBookieData(final Set<BookieId> bookiesSrc, boolean dryrun, boolean skipOpenLedgers)
throws InterruptedException, BKException {
recoverBookieData(bookiesSrc, dryrun, skipOpenLedgers, false);
}
public void recoverBookieData(final Set<BookieId> bookiesSrc, boolean dryrun, boolean skipOpenLedgers,
boolean skipUnrecoverableLedgers) throws InterruptedException, BKException {
SyncObject sync = new SyncObject();
// Call the async method to recover bookie data.
asyncRecoverBookieData(bookiesSrc, dryrun, skipOpenLedgers, skipUnrecoverableLedgers, new RecoverCallback() {
@Override
public void recoverComplete(int rc, Object ctx) {
LOG.info("Recover bookie operation completed with rc: {}", BKException.codeLogger(rc));
SyncObject syncObj = (SyncObject) ctx;
synchronized (syncObj) {
syncObj.rc = rc;
syncObj.value = true;
syncObj.notify();
}
}
}, sync);
// Wait for the async method to complete.
synchronized (sync) {
while (!sync.value) {
sync.wait();
}
}
if (sync.rc != BKException.Code.OK) {
throw BKException.create(sync.rc);
}
}
public void recoverBookieData(final long lid,
final Set<BookieId> bookiesSrc,
boolean dryrun,
boolean skipOpenLedgers)
throws InterruptedException, BKException {
SyncObject sync = new SyncObject();
// Call the async method to recover bookie data.
asyncRecoverBookieData(lid, bookiesSrc, dryrun, skipOpenLedgers, (rc, ctx) -> {
LOG.info("Recover bookie for {} completed with rc : {}", lid, BKException.codeLogger(rc));
SyncObject syncObject = (SyncObject) ctx;
synchronized (syncObject) {
syncObject.rc = rc;
syncObject.value = true;
syncObject.notify();
}
}, sync);
// Wait for the async method to complete.
synchronized (sync) {
while (!sync.value) {
sync.wait();
}
}
if (sync.rc != BKException.Code.OK) {
throw BKException.create(sync.rc);
}
}
/**
* Async method to rebuild and recover the ledger fragments data that was
* stored on the source bookie. That bookie could have failed completely and
* now the ledger data that was stored on it is under replicated. An
* optional destination bookie server could be given if we want to copy all
* of the ledger fragments data on the failed source bookie to it.
* Otherwise, we will just randomly distribute the ledger fragments to the
* active set of bookies, perhaps based on load. All ZooKeeper ledger
* metadata will be updated to point to the new bookie(s) that contain the
* replicated ledger fragments.
*
* @param bookieSrc
* Source bookie that had a failure. We want to replicate the
* ledger fragments that were stored there.
* @param cb
* RecoverCallback to invoke once all of the data on the dead
* bookie has been recovered and replicated.
* @param context
* Context for the RecoverCallback to call.
*/
public void asyncRecoverBookieData(final BookieId bookieSrc,
final RecoverCallback cb, final Object context) {
Set<BookieId> bookiesSrc = Sets.newHashSet(bookieSrc);
asyncRecoverBookieData(bookiesSrc, cb, context);
}
public void asyncRecoverBookieData(final Set<BookieId> bookieSrc,
final RecoverCallback cb, final Object context) {
asyncRecoverBookieData(bookieSrc, false, false, false, cb, context);
}
public void asyncRecoverBookieData(final Set<BookieId> bookieSrc, boolean dryrun,
final boolean skipOpenLedgers, final boolean skipUnrecoverableLedgers,
final RecoverCallback cb, final Object context) {
getActiveLedgers(bookieSrc, dryrun, skipOpenLedgers, skipUnrecoverableLedgers, cb, context);
}
/**
* Recover a specific ledger.
*
* @param lid
* ledger to recover
* @param bookieSrc
* Source bookies that had a failure. We want to replicate the ledger fragments that were stored there.
* @param dryrun
* dryrun the recover procedure.
* @param skipOpenLedgers
* Skip recovering open ledgers.
* @param callback
* RecoverCallback to invoke once all of the data on the dead
* bookie has been recovered and replicated.
* @param context
* Context for the RecoverCallback to call.
*/
public void asyncRecoverBookieData(long lid, final Set<BookieId> bookieSrc, boolean dryrun,
boolean skipOpenLedgers, final RecoverCallback callback, final Object context) {
AsyncCallback.VoidCallback callbackWrapper = (rc, path, ctx)
-> callback.recoverComplete(bkc.getReturnRc(rc), context);
recoverLedger(bookieSrc, lid, dryrun, skipOpenLedgers, callbackWrapper);
}
/**
* This method asynchronously polls ZK to get the current set of active
* ledgers. From this, we can open each ledger and look at the metadata to
* determine if any of the ledger fragments for it were stored at the dead
* input bookie.
*
* @param bookiesSrc
* Source bookies that had a failure. We want to replicate the
* ledger fragments that were stored there.
* @param dryrun
* dryrun the recover procedure.
* @param skipOpenLedgers
* Skip recovering open ledgers.
* @param cb
* RecoverCallback to invoke once all of the data on the dead
* bookie has been recovered and replicated.
* @param context
* Context for the RecoverCallback to call.
*/
private void getActiveLedgers(final Set<BookieId> bookiesSrc, final boolean dryrun,
final boolean skipOpenLedgers, final boolean skipUnrecoverableLedgers,
final RecoverCallback cb, final Object context) {
// Wrapper class around the RecoverCallback so it can be used
// as the final VoidCallback to process ledgers
class RecoverCallbackWrapper implements AsyncCallback.VoidCallback {
final RecoverCallback cb;
RecoverCallbackWrapper(RecoverCallback cb) {
this.cb = cb;
}
@Override
public void processResult(int rc, String path, Object ctx) {
cb.recoverComplete(bkc.getReturnRc(rc), ctx);
}
}
Processor<Long> ledgerProcessor = new Processor<Long>() {
@Override
public void process(Long ledgerId, AsyncCallback.VoidCallback iterCallback) {
recoverLedger(bookiesSrc, ledgerId, dryrun, skipOpenLedgers, skipUnrecoverableLedgers, iterCallback);
}
};
bkc.getLedgerManager().asyncProcessLedgers(
ledgerProcessor, new RecoverCallbackWrapper(cb),
context, BKException.Code.OK, BKException.Code.LedgerRecoveryException);
}
/**
* This method asynchronously recovers a given ledger if any of the ledger
* entries were stored on the failed bookie.
*
* @param bookiesSrc
* Source bookies that had a failure. We want to replicate the
* ledger fragments that were stored there.
* @param lId
* Ledger id we want to recover.
* @param dryrun
* printing the recovery plan without actually recovering bookies
* @param skipOpenLedgers
* Skip recovering open ledgers.
* @param finalLedgerIterCb
* IterationCallback to invoke once we've recovered the current
* ledger.
*/
private void recoverLedger(final Set<BookieId> bookiesSrc, final long lId, final boolean dryrun,
final boolean skipOpenLedgers, final AsyncCallback.VoidCallback finalLedgerIterCb) {
recoverLedger(bookiesSrc, lId, dryrun, skipOpenLedgers, false, finalLedgerIterCb);
}
/**
* This method asynchronously recovers a given ledger if any of the ledger
* entries were stored on the failed bookie.
*
* @param bookiesSrc
* Source bookies that had a failure. We want to replicate the
* ledger fragments that were stored there.
* @param lId
* Ledger id we want to recover.
* @param dryrun
* printing the recovery plan without actually recovering bookies
* @param skipOpenLedgers
* Skip recovering open ledgers.
* @param skipUnrecoverableLedgers
* Skip unrecoverable ledgers.
* @param finalLedgerIterCb
* IterationCallback to invoke once we've recovered the current
* ledger.
*/
private void recoverLedger(final Set<BookieId> bookiesSrc, final long lId, final boolean dryrun,
final boolean skipOpenLedgers, final boolean skipUnrecoverableLedgers,
final AsyncCallback.VoidCallback finalLedgerIterCb) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recovering ledger : {}", lId);
}
asyncOpenLedgerNoRecovery(lId, new OpenCallback() {
@Override
public void openComplete(int rc, final LedgerHandle lh, Object ctx) {
if (rc != BKException.Code.OK) {
if (skipUnrecoverableLedgers) {
LOG.warn("BK error opening ledger: {}, skip recover it.", lId, BKException.create(rc));
finalLedgerIterCb.processResult(BKException.Code.OK, null, null);
} else {
LOG.error("BK error opening ledger: {}", lId, BKException.create(rc));
finalLedgerIterCb.processResult(rc, null, null);
}
return;
}
LedgerMetadata lm = lh.getLedgerMetadata();
if (skipOpenLedgers && lm.getState() == LedgerMetadata.State.OPEN) {
LOG.info("Skip recovering open ledger {}.", lId);
try {
lh.close();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
} catch (BKException bke) {
LOG.warn("Error on closing ledger handle for {}.", lId);
}
finalLedgerIterCb.processResult(BKException.Code.OK, null, null);
return;
}
final boolean fenceRequired = !lm.isClosed() && containBookiesInLastEnsemble(lm, bookiesSrc);
// the original write has not removed faulty bookie from
// current ledger ensemble. to avoid data loss issue in
// the case of concurrent updates to the ensemble composition,
// the recovery tool should first close the ledger
if (!dryrun && fenceRequired) {
// close opened non recovery ledger handle
try {
lh.close();
} catch (Exception ie) {
LOG.warn("Error closing non recovery ledger handle for ledger " + lId, ie);
}
asyncOpenLedger(lId, new OpenCallback() {
@Override
public void openComplete(int newrc, final LedgerHandle newlh, Object newctx) {
if (newrc != BKException.Code.OK) {
if (skipUnrecoverableLedgers) {
LOG.warn("BK error opening ledger: {}, skip recover it.",
lId, BKException.create(newrc));
finalLedgerIterCb.processResult(BKException.Code.OK, null, null);
} else {
LOG.error("BK error close ledger: {}", lId, BKException.create(newrc));
finalLedgerIterCb.processResult(newrc, null, null);
}
return;
}
bkc.mainWorkerPool.submit(() -> {
// do recovery
recoverLedger(bookiesSrc, lId, dryrun, skipOpenLedgers,
skipUnrecoverableLedgers, finalLedgerIterCb);
});
}
}, null);
return;
}
final AsyncCallback.VoidCallback ledgerIterCb = new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
if (BKException.Code.OK != rc) {
if (skipUnrecoverableLedgers) {
LOG.warn("Failed to recover ledger: {} : {}, skip recover it.", lId,
BKException.codeLogger(rc));
rc = BKException.Code.OK;
} else {
LOG.error("Failed to recover ledger {} : {}", lId, BKException.codeLogger(rc));
}
} else {
LOG.info("Recovered ledger {}.", lId);
}
try {
lh.close();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
} catch (BKException bke) {
LOG.warn("Error on closing ledger handle for {}.", lId);
}
finalLedgerIterCb.processResult(rc, path, ctx);
}
};
/*
* This List stores the ledger fragments to recover indexed by
* the start entry ID for the range. The ensembles TreeMap is
* keyed off this.
*/
final List<Long> ledgerFragmentsToRecover = new LinkedList<Long>();
/*
* This Map will store the start and end entry ID values for
* each of the ledger fragment ranges. The only exception is the
* current active fragment since it has no end yet. In the event
* of a bookie failure, a new ensemble is created so the current
* ensemble should not contain the dead bookie we are trying to
* recover.
*/
Map<Long, Long> ledgerFragmentsRange = new HashMap<Long, Long>();
Long curEntryId = null;
for (Map.Entry<Long, ? extends List<BookieId>> entry :
lh.getLedgerMetadata().getAllEnsembles().entrySet()) {
if (curEntryId != null) {
ledgerFragmentsRange.put(curEntryId, entry.getKey() - 1);
}
curEntryId = entry.getKey();
if (containBookies(entry.getValue(), bookiesSrc)) {
/*
* Current ledger fragment has entries stored on the
* dead bookie so we'll need to recover them.
*/
ledgerFragmentsToRecover.add(entry.getKey());
}
}
// add last ensemble otherwise if the failed bookie existed in
// the last ensemble of a closed ledger. the entries belonged to
// last ensemble would not be replicated.
if (curEntryId != null) {
ledgerFragmentsRange.put(curEntryId, lh.getLastAddConfirmed());
}
/*
* See if this current ledger contains any ledger fragment that
* needs to be re-replicated. If not, then just invoke the
* multiCallback and return.
*/
if (ledgerFragmentsToRecover.size() == 0) {
ledgerIterCb.processResult(BKException.Code.OK, null, null);
return;
}
if (dryrun) {
VERBOSE.info("Recovered ledger {} : {}", lId, (fenceRequired ? "[fence required]" : ""));
}
/*
* Multicallback for ledger. Once all fragments for the ledger have been recovered
* trigger the ledgerIterCb
*/
MultiCallback ledgerFragmentsMcb = new MultiCallback(ledgerFragmentsToRecover.size(), ledgerIterCb,
null, BKException.Code.OK, BKException.Code.LedgerRecoveryException);
/*
* Now recover all of the necessary ledger fragments
* asynchronously using a MultiCallback for every fragment.
*/
for (final Long startEntryId : ledgerFragmentsToRecover) {
Long endEntryId = ledgerFragmentsRange.get(startEntryId);
List<BookieId> ensemble = lh.getLedgerMetadata().getAllEnsembles().get(startEntryId);
// Get bookies to replace
Map<Integer, BookieId> targetBookieAddresses;
try {
targetBookieAddresses = getReplacementBookies(lh, ensemble, bookiesSrc);
} catch (BKException.BKNotEnoughBookiesException e) {
if (!dryrun) {
ledgerFragmentsMcb.processResult(BKException.Code.NotEnoughBookiesException, null, null);
} else {
VERBOSE.info(" Fragment [{} - {}] : {}", startEntryId, endEntryId,
BKException.getMessage(BKException.Code.NotEnoughBookiesException));
}
continue;
}
if (dryrun) {
ArrayList<BookieId> newEnsemble =
replaceBookiesInEnsemble(ensemble, targetBookieAddresses);
VERBOSE.info(" Fragment [{} - {}] : ", startEntryId, endEntryId);
VERBOSE.info(" old ensemble : {}", formatEnsemble(ensemble, bookiesSrc, '*'));
VERBOSE.info(" new ensemble : {}", formatEnsemble(newEnsemble, bookiesSrc, '*'));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Replicating fragment from [{}, {}] of ledger {} to {}",
startEntryId, endEntryId, lh.getId(), targetBookieAddresses);
}
try {
LedgerFragmentReplicator.SingleFragmentCallback cb =
new LedgerFragmentReplicator.SingleFragmentCallback(ledgerFragmentsMcb, lh,
bkc.getLedgerManager(),
startEntryId, getReplacementBookiesMap(ensemble, targetBookieAddresses));
LedgerFragment ledgerFragment = new LedgerFragment(lh,
startEntryId, endEntryId, targetBookieAddresses.keySet());
asyncRecoverLedgerFragment(lh, ledgerFragment, cb,
Sets.newHashSet(targetBookieAddresses.values()), NOOP_BICONSUMER);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
}
}
if (dryrun) {
ledgerIterCb.processResult(BKException.Code.OK, null, null);
}
}
}, null);
}
static String formatEnsemble(List<BookieId> ensemble, Set<BookieId> bookiesSrc,
char marker) {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (int i = 0; i < ensemble.size(); i++) {
sb.append(ensemble.get(i));
if (bookiesSrc.contains(ensemble.get(i))) {
sb.append(marker);
} else {
sb.append(' ');
}
if (i != ensemble.size() - 1) {
sb.append(", ");
}
}
sb.append("]");
return sb.toString();
}
/**
* This method asynchronously recovers a ledger fragment which is a
* contiguous portion of a ledger that was stored in an ensemble that
* included the failed bookie.
*
* @param lh
* - LedgerHandle for the ledger
* @param ledgerFragment
* - LedgerFragment to replicate
* @param ledgerFragmentMcb
* - MultiCallback to invoke once we've recovered the current
* ledger fragment.
* @param newBookies
* - New bookies we want to use to recover and replicate the
* ledger entries that were stored on the failed bookie.
*/
private void asyncRecoverLedgerFragment(final LedgerHandle lh,
final LedgerFragment ledgerFragment,
final AsyncCallback.VoidCallback ledgerFragmentMcb,
final Set<BookieId> newBookies,
final BiConsumer<Long, Long> onReadEntryFailureCallback) throws InterruptedException {
lfr.replicate(lh, ledgerFragment, ledgerFragmentMcb, newBookies, onReadEntryFailureCallback);
}
private Map<Integer, BookieId> getReplacementBookies(
LedgerHandle lh,
List<BookieId> ensemble,
Set<BookieId> bookiesToRereplicate)
throws BKException.BKNotEnoughBookiesException {
Set<Integer> bookieIndexesToRereplicate = Sets.newHashSet();
for (int bookieIndex = 0; bookieIndex < ensemble.size(); bookieIndex++) {
BookieId bookieInEnsemble = ensemble.get(bookieIndex);
if (bookiesToRereplicate.contains(bookieInEnsemble)) {
bookieIndexesToRereplicate.add(bookieIndex);
}
}
return getReplacementBookiesByIndexes(
lh, ensemble, bookieIndexesToRereplicate, Optional.of(bookiesToRereplicate));
}
private Map<Integer, BookieId> getReplacementBookiesByIndexes(
LedgerHandle lh,
List<BookieId> ensemble,
Set<Integer> bookieIndexesToRereplicate,
Optional<Set<BookieId>> excludedBookies)
throws BKException.BKNotEnoughBookiesException {
// target bookies to replicate
Map<Integer, BookieId> targetBookieAddresses =
Maps.newHashMapWithExpectedSize(bookieIndexesToRereplicate.size());
// bookies to exclude for ensemble allocation
Set<BookieId> bookiesToExclude = Sets.newHashSet();
if (excludedBookies.isPresent()) {
bookiesToExclude.addAll(excludedBookies.get());
}
// excluding bookies that need to be replicated
for (Integer bookieIndex : bookieIndexesToRereplicate) {
BookieId bookie = ensemble.get(bookieIndex);
bookiesToExclude.add(bookie);
}
// allocate bookies
for (Integer bookieIndex : bookieIndexesToRereplicate) {
BookieId oldBookie = ensemble.get(bookieIndex);
EnsemblePlacementPolicy.PlacementResult<BookieId> replaceBookieResponse =
bkc.getPlacementPolicy().replaceBookie(
lh.getLedgerMetadata().getEnsembleSize(),
lh.getLedgerMetadata().getWriteQuorumSize(),
lh.getLedgerMetadata().getAckQuorumSize(),
lh.getLedgerMetadata().getCustomMetadata(),
ensemble,
oldBookie,
bookiesToExclude);
BookieId newBookie = replaceBookieResponse.getResult();
PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy = replaceBookieResponse.getAdheringToPolicy();
if (isEnsembleAdheringToPlacementPolicy == PlacementPolicyAdherence.FAIL && LOG.isDebugEnabled()) {
LOG.debug(
"replaceBookie for bookie: {} in ensemble: {} "
+ "is not adhering to placement policy and chose {}",
oldBookie, ensemble, newBookie);
}
targetBookieAddresses.put(bookieIndex, newBookie);
bookiesToExclude.add(newBookie);
}
return targetBookieAddresses;
}
private ArrayList<BookieId> replaceBookiesInEnsemble(
List<BookieId> ensemble,
Map<Integer, BookieId> replacedBookies) {
ArrayList<BookieId> newEnsemble = Lists.newArrayList(ensemble);
for (Map.Entry<Integer, BookieId> entry : replacedBookies.entrySet()) {
newEnsemble.set(entry.getKey(), entry.getValue());
}
return newEnsemble;
}
/**
* Replicate the Ledger fragment to target Bookie passed.
*
* @param lh
* - ledgerHandle
* @param ledgerFragment
* - LedgerFragment to replicate
*/
public void replicateLedgerFragment(LedgerHandle lh, final LedgerFragment ledgerFragment,
final BiConsumer<Long, Long> onReadEntryFailureCallback) throws InterruptedException, BKException {
Map<Integer, BookieId> targetBookieAddresses = null;
if (LedgerFragment.ReplicateType.DATA_LOSS == ledgerFragment.getReplicateType()) {
Optional<Set<BookieId>> excludedBookies = Optional.empty();
targetBookieAddresses = getReplacementBookiesByIndexes(lh, ledgerFragment.getEnsemble(),
ledgerFragment.getBookiesIndexes(), excludedBookies);
} else if (LedgerFragment.ReplicateType.DATA_NOT_ADHERING_PLACEMENT == ledgerFragment.getReplicateType()) {
targetBookieAddresses = replaceNotAdheringPlacementPolicyBookie(ledgerFragment.getEnsemble(),
lh.getLedgerMetadata().getWriteQuorumSize(), lh.getLedgerMetadata().getAckQuorumSize());
ledgerFragment.getBookiesIndexes().addAll(targetBookieAddresses.keySet());
}
if (MapUtils.isEmpty(targetBookieAddresses)) {
LOG.warn("Could not replicate for {} ledger: {}, not find target bookie.",
ledgerFragment.getReplicateType(), ledgerFragment.getLedgerId());
throw new BKException.BKLedgerRecoveryException();
}
replicateLedgerFragment(lh, ledgerFragment, targetBookieAddresses, onReadEntryFailureCallback);
}
private void replicateLedgerFragment(LedgerHandle lh,
final LedgerFragment ledgerFragment,
final Map<Integer, BookieId> targetBookieAddresses,
final BiConsumer<Long, Long> onReadEntryFailureCallback)
throws InterruptedException, BKException {
CompletableFuture<Void> result = new CompletableFuture<>();
ResultCallBack resultCallBack = new ResultCallBack(result);
SingleFragmentCallback cb = new SingleFragmentCallback(
resultCallBack,
lh,
bkc.getLedgerManager(),
ledgerFragment.getFirstEntryId(),
getReplacementBookiesMap(ledgerFragment, targetBookieAddresses));
Set<BookieId> targetBookieSet = Sets.newHashSet();
targetBookieSet.addAll(targetBookieAddresses.values());
asyncRecoverLedgerFragment(lh, ledgerFragment, cb, targetBookieSet, onReadEntryFailureCallback);
try {
SyncCallbackUtils.waitForResult(result);
} catch (BKException err) {
throw BKException.create(bkc.getReturnRc(err.getCode()));
}
}
private static Map<BookieId, BookieId> getReplacementBookiesMap(
List<BookieId> ensemble,
Map<Integer, BookieId> targetBookieAddresses) {
Map<BookieId, BookieId> bookiesMap =
new HashMap<BookieId, BookieId>();
for (Map.Entry<Integer, BookieId> entry : targetBookieAddresses.entrySet()) {
BookieId oldBookie = ensemble.get(entry.getKey());
BookieId newBookie = entry.getValue();
bookiesMap.put(oldBookie, newBookie);
}
return bookiesMap;
}
private static Map<BookieId, BookieId> getReplacementBookiesMap(
LedgerFragment ledgerFragment,
Map<Integer, BookieId> targetBookieAddresses) {
Map<BookieId, BookieId> bookiesMap =
new HashMap<BookieId, BookieId>();
for (Integer bookieIndex : ledgerFragment.getBookiesIndexes()) {
BookieId oldBookie = ledgerFragment.getAddress(bookieIndex);
BookieId newBookie = targetBookieAddresses.get(bookieIndex);
bookiesMap.put(oldBookie, newBookie);
}
return bookiesMap;
}
private static boolean containBookiesInLastEnsemble(LedgerMetadata lm,
Set<BookieId> bookies) {
if (lm.getAllEnsembles().size() <= 0) {
return false;
}
Long lastKey = lm.getAllEnsembles().lastKey();
List<BookieId> lastEnsemble = lm.getAllEnsembles().get(lastKey);
return containBookies(lastEnsemble, bookies);
}
private static boolean containBookies(List<BookieId> ensemble,
Set<BookieId> bookies) {
for (BookieId bookie : ensemble) {
if (bookies.contains(bookie)) {
return true;
}
}
return false;
}
/**
* This is the class for getting the replication result.
*/
public static class ResultCallBack implements AsyncCallback.VoidCallback {
private final CompletableFuture<Void> sync;
public ResultCallBack(CompletableFuture<Void> sync) {
this.sync = sync;
}
@Override
@SuppressWarnings("unchecked")
public void processResult(int rc, String s, Object ctx) {
SyncCallbackUtils.finish(rc, null, sync);
}
}
/**
* Format the BookKeeper metadata in zookeeper.
*
* @param isInteractive
* Whether format should ask prompt for confirmation if old data
* exists or not.
* @param force
* If non interactive and force is true, then old data will be
* removed without prompt.
* @return Returns true if format succeeds else false.
*/
public static boolean format(ServerConfiguration conf,
boolean isInteractive, boolean force) throws Exception {
return runFunctionWithMetadataBookieDriver(conf, new Function<MetadataBookieDriver, Boolean>() {
@Override
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public Boolean apply(MetadataBookieDriver driver) {
try {
try (RegistrationManager regManager = driver.createRegistrationManager()) {
boolean ledgerRootExists = regManager.prepareFormat();
// If old data was there then confirm with admin.
boolean doFormat = true;
if (ledgerRootExists) {
if (!isInteractive) {
// If non interactive and force is set, then delete old data.
doFormat = force;
} else {
// Confirm with the admin.
doFormat = IOUtils
.confirmPrompt("Ledger root already exists. "
+ "Are you sure to format bookkeeper metadata? "
+ "This may cause data loss.");
}
}
if (!doFormat) {
return false;
}
driver.getLedgerManagerFactory().format(
conf,
driver.getLayoutManager());
return regManager.format();
}
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
}
});
}
/**
* Intializes new cluster by creating required znodes for the cluster. If
* ledgersrootpath is already existing then it will error out.
*
* @param conf
* @return
* @throws Exception
*/
public static boolean initNewCluster(ServerConfiguration conf) throws Exception {
return runFunctionWithRegistrationManager(conf, rm -> {
try {
return rm.initNewCluster();
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
});
}
/**
* Nukes existing cluster metadata. But it does only if the provided
* ledgersRootPath matches with configuration's zkLedgersRootPath and
* provided instanceid matches with the cluster metadata. If force is
* mentioned then instanceid will not be validated.
*
* @param conf
* @param ledgersRootPath
* @param instanceId
* @param force
* @return
* @throws Exception
*/
public static boolean nukeExistingCluster(ServerConfiguration conf, String ledgersRootPath, String instanceId,
boolean force) throws Exception {
String confLedgersRootPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(conf);
if (!confLedgersRootPath.equals(ledgersRootPath)) {
LOG.error("Provided ledgerRootPath : {} is not matching with config's ledgerRootPath: {}, "
+ "so exiting nuke operation", ledgersRootPath, confLedgersRootPath);
return false;
}
return runFunctionWithRegistrationManager(conf, rm -> {
try {
if (!force) {
String readInstanceId = rm.getClusterInstanceId();
if ((instanceId == null) || !instanceId.equals(readInstanceId)) {
LOG.error("Provided InstanceId : {} is not matching with cluster InstanceId in ZK: {}",
instanceId, readInstanceId);
return false;
}
}
return rm.nukeExistingCluster();
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
});
}
/**
* Initializes bookie, by making sure that the journalDir, ledgerDirs and
* indexDirs are empty and there is no registered Bookie with this BookieId.
*
* @param conf
* @return
* @throws Exception
*/
public static boolean initBookie(ServerConfiguration conf) throws Exception {
/*
* make sure that journalDirs, ledgerDirs and indexDirs are empty
*/
File[] journalDirs = conf.getJournalDirs();
if (!validateDirectoriesAreEmpty(journalDirs, "JournalDir")) {
return false;
}
File[] ledgerDirs = conf.getLedgerDirs();
if (!validateDirectoriesAreEmpty(ledgerDirs, "LedgerDir")) {
return false;
}
File[] indexDirs = conf.getIndexDirs();
if (indexDirs != null) {
if (!validateDirectoriesAreEmpty(indexDirs, "IndexDir")) {
return false;
}
}
return runFunctionWithRegistrationManager(conf, rm -> {
try {
/*
* make sure that there is no bookie registered with the same
* bookieid and the cookie for the same bookieid is not existing.
*/
BookieId bookieId = BookieImpl.getBookieId(conf);
if (rm.isBookieRegistered(bookieId)) {
LOG.error("Bookie with bookieId: {} is still registered, "
+ "If this node is running bookie process, try stopping it first.", bookieId);
return false;
}
try {
rm.readCookie(bookieId);
LOG.error("Cookie still exists in the ZK for this bookie: {}, try formatting the bookie", bookieId);
return false;
} catch (BookieException.CookieNotFoundException nfe) {
// it is expected for readCookie to fail with
// BookieException.CookieNotFoundException
}
return true;
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
});
}
private static boolean validateDirectoriesAreEmpty(File[] dirs, String typeOfDir) {
for (File dir : dirs) {
File[] dirFiles = dir.listFiles();
if ((dirFiles != null) && dirFiles.length != 0) {
LOG.error("{}: {} is existing and its not empty, try formatting the bookie", typeOfDir, dir);
return false;
}
}
return true;
}
/**
* This method returns an iterable object for the list of ledger identifiers of
* the ledgers currently available.
*
* @return an iterable object for the list of ledger identifiers
* @throws IOException if the list of ledger identifiers cannot be read from the
* metadata store
*/
public Iterable<Long> listLedgers()
throws IOException {
final LedgerRangeIterator iterator = bkc.getLedgerManager().getLedgerRanges(0);
return new Iterable<Long>() {
@Override
public Iterator<Long> iterator() {
return new Iterator<Long>() {
Iterator<Long> currentRange = null;
@Override
public boolean hasNext() {
try {
if (iterator.hasNext()) {
return true;
} else if (currentRange != null) {
if (currentRange.hasNext()) {
return true;
}
}
} catch (IOException e) {
LOG.error("Error while checking if there is a next element", e);
}
return false;
}
@Override
public Long next() throws NoSuchElementException {
try {
if ((currentRange == null) || (!currentRange.hasNext())) {
currentRange = iterator.next().getLedgers().iterator();
}
} catch (IOException e) {
LOG.error("Error while reading the next element", e);
throw new NoSuchElementException(e.getMessage());
}
return currentRange.next();
}
@Override
public void remove()
throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* @return the metadata for the passed ledger handle
*/
public LedgerMetadata getLedgerMetadata(LedgerHandle lh) {
return lh.getLedgerMetadata();
}
private LedgerUnderreplicationManager getUnderreplicationManager()
throws CompatibilityException, UnavailableException, InterruptedException {
if (underreplicationManager == null) {
underreplicationManager = mFactory.newLedgerUnderreplicationManager();
}
return underreplicationManager;
}
private LedgerAuditorManager getLedgerAuditorManager()
throws IOException, InterruptedException {
if (ledgerAuditorManager == null) {
ledgerAuditorManager = mFactory.newLedgerAuditorManager();
}
return ledgerAuditorManager;
}
/**
* Setter for LostBookieRecoveryDelay value (in seconds) in Zookeeper.
*
* @param lostBookieRecoveryDelay
* lostBookieRecoveryDelay value (in seconds) to set
* @throws CompatibilityException
* @throws KeeperException
* @throws InterruptedException
* @throws UnavailableException
*/
public void setLostBookieRecoveryDelay(int lostBookieRecoveryDelay)
throws CompatibilityException, KeeperException, InterruptedException, UnavailableException {
LedgerUnderreplicationManager urlManager = getUnderreplicationManager();
urlManager.setLostBookieRecoveryDelay(lostBookieRecoveryDelay);
}
/**
* Returns the current LostBookieRecoveryDelay value (in seconds) in Zookeeper.
*
* @return
* current lostBookieRecoveryDelay value (in seconds)
* @throws CompatibilityException
* @throws KeeperException
* @throws InterruptedException
* @throws UnavailableException
*/
public int getLostBookieRecoveryDelay()
throws CompatibilityException, KeeperException, InterruptedException, UnavailableException {
LedgerUnderreplicationManager urlManager = getUnderreplicationManager();
return urlManager.getLostBookieRecoveryDelay();
}
/**
* Trigger AuditTask by resetting lostBookieRecoveryDelay to its current
* value. If Autorecovery is not enabled or if there is no Auditor then this
* method will throw UnavailableException.
*
* @throws CompatibilityException
* @throws KeeperException
* @throws InterruptedException
* @throws UnavailableException
* @throws IOException
*/
public void triggerAudit()
throws CompatibilityException, KeeperException, InterruptedException, UnavailableException, IOException {
LedgerUnderreplicationManager urlManager = getUnderreplicationManager();
if (!urlManager.isLedgerReplicationEnabled()) {
LOG.error("Autorecovery is disabled. So giving up!");
throw new UnavailableException("Autorecovery is disabled. So giving up!");
}
BookieId auditorId = getLedgerAuditorManager().getCurrentAuditor();
if (auditorId == null) {
LOG.error("No auditor elected, though Autorecovery is enabled. So giving up.");
throw new UnavailableException("No auditor elected, though Autorecovery is enabled. So giving up.");
}
int previousLostBookieRecoveryDelayValue = urlManager.getLostBookieRecoveryDelay();
LOG.info("Resetting LostBookieRecoveryDelay value: {}, to kickstart audit task",
previousLostBookieRecoveryDelayValue);
urlManager.setLostBookieRecoveryDelay(previousLostBookieRecoveryDelayValue);
}
/**
* Triggers AuditTask by resetting lostBookieRecoveryDelay and then make
* sure the ledgers stored in the given decommissioning bookie are properly
* replicated and they are not underreplicated because of the given bookie.
* This method waits untill there are no underreplicatedledgers because of this
* bookie. If the given Bookie is not shutdown yet, then it will throw
* BKIllegalOpException.
*
* @param bookieAddress
* address of the decommissioning bookie
* @throws CompatibilityException
* @throws UnavailableException
* @throws KeeperException
* @throws InterruptedException
* @throws IOException
* @throws BKAuditException
* @throws TimeoutException
* @throws BKException
*/
public void decommissionBookie(BookieId bookieAddress)
throws CompatibilityException, UnavailableException, KeeperException, InterruptedException, IOException,
BKAuditException, TimeoutException, BKException {
if (getAvailableBookies().contains(bookieAddress) || getReadOnlyBookies().contains(bookieAddress)) {
LOG.error("Bookie: {} is not shutdown yet", bookieAddress);
throw BKException.create(BKException.Code.IllegalOpException);
}
triggerAudit();
/*
* Sleep for 30 secs, so that Auditor gets chance to trigger its
* force audittask and let the underreplicationmanager process
* to do its replication process
*/
Thread.sleep(30 * 1000);
/*
* get the collection of the ledgers which are stored in this
* bookie, by making a call to
* bookieLedgerIndexer.getBookieToLedgerIndex.
*/
BookieLedgerIndexer bookieLedgerIndexer = new BookieLedgerIndexer(bkc.ledgerManager);
Map<String, Set<Long>> bookieToLedgersMap = bookieLedgerIndexer.getBookieToLedgerIndex();
Set<Long> ledgersStoredInThisBookie = bookieToLedgersMap.get(bookieAddress.toString());
if ((ledgersStoredInThisBookie != null) && (!ledgersStoredInThisBookie.isEmpty())) {
/*
* wait untill all the ledgers are replicated to other
* bookies by making sure that these ledgers metadata don't
* contain this bookie as part of their ensemble.
*/
waitForLedgersToBeReplicated(ledgersStoredInThisBookie, bookieAddress, bkc.ledgerManager);
}
// for double-checking, check if any ledgers are listed as underreplicated because of this bookie
Predicate<List<String>> predicate = replicasList -> replicasList.contains(bookieAddress.toString());
Iterator<UnderreplicatedLedger> urLedgerIterator = underreplicationManager.listLedgersToRereplicate(predicate);
if (urLedgerIterator.hasNext()) {
//if there are any then wait and make sure those ledgers are replicated properly
LOG.info("Still in some underreplicated ledgers metadata, this bookie is part of its ensemble. "
+ "Have to make sure that those ledger fragments are rereplicated");
List<Long> urLedgers = new ArrayList<>();
urLedgerIterator.forEachRemaining((urLedger) -> {
urLedgers.add(urLedger.getLedgerId());
});
waitForLedgersToBeReplicated(urLedgers, bookieAddress, bkc.ledgerManager);
}
}
private void waitForLedgersToBeReplicated(Collection<Long> ledgers, BookieId thisBookieAddress,
LedgerManager ledgerManager) throws InterruptedException, TimeoutException {
int maxSleepTimeInBetweenChecks = 5 * 60 * 1000; // 5 minutes
int sleepTimePerLedger = 3 * 1000; // 3 secs
Predicate<Long> validateBookieIsNotPartOfEnsemble = ledgerId -> !areEntriesOfLedgerStoredInTheBookie(ledgerId,
thisBookieAddress, ledgerManager);
ledgers.removeIf(validateBookieIsNotPartOfEnsemble);
while (!ledgers.isEmpty()) {
int sleepTimeForThisCheck = (long) ledgers.size() * sleepTimePerLedger > maxSleepTimeInBetweenChecks
? maxSleepTimeInBetweenChecks : ledgers.size() * sleepTimePerLedger;
LOG.info("Count of Ledgers which need to be rereplicated: {}, waiting {} seconds for next check",
ledgers.size(), sleepTimeForThisCheck / 1000);
Thread.sleep(sleepTimeForThisCheck);
if (LOG.isDebugEnabled()) {
LOG.debug("Making sure following ledgers replication to be completed: {}", ledgers);
}
ledgers.removeIf(validateBookieIsNotPartOfEnsemble);
}
}
public static boolean areEntriesOfLedgerStoredInTheBookie(long ledgerId, BookieId bookieAddress,
LedgerManager ledgerManager) {
try {
LedgerMetadata ledgerMetadata = ledgerManager.readLedgerMetadata(ledgerId).get().getValue();
return areEntriesOfLedgerStoredInTheBookie(ledgerId, bookieAddress, ledgerMetadata);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
} catch (ExecutionException e) {
if (e.getCause() != null
&& e.getCause().getClass()
.equals(BKException.BKNoSuchLedgerExistsOnMetadataServerException.class)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ledger: {} has been deleted", ledgerId);
}
return false;
} else {
LOG.error("Got exception while trying to read LedgerMetadata of " + ledgerId, e);
throw new RuntimeException(e);
}
}
}
public static boolean areEntriesOfLedgerStoredInTheBookie(long ledgerId, BookieId bookieAddress,
LedgerMetadata ledgerMetadata) {
Collection<? extends List<BookieId>> ensemblesOfSegments = ledgerMetadata.getAllEnsembles().values();
Iterator<? extends List<BookieId>> ensemblesOfSegmentsIterator = ensemblesOfSegments.iterator();
List<BookieId> ensemble;
int segmentNo = 0;
while (ensemblesOfSegmentsIterator.hasNext()) {
ensemble = ensemblesOfSegmentsIterator.next();
if (ensemble.contains(bookieAddress)) {
if (areEntriesOfSegmentStoredInTheBookie(ledgerMetadata, bookieAddress, segmentNo)) {
return true;
}
}
segmentNo++;
}
return false;
}
private static boolean areEntriesOfSegmentStoredInTheBookie(LedgerMetadata ledgerMetadata,
BookieId bookieAddress, int segmentNo) {
boolean isLedgerClosed = ledgerMetadata.isClosed();
int ensembleSize = ledgerMetadata.getEnsembleSize();
int writeQuorumSize = ledgerMetadata.getWriteQuorumSize();
List<Entry<Long, ? extends List<BookieId>>> segments =
new LinkedList<>(ledgerMetadata.getAllEnsembles().entrySet());
List<BookieId> currentSegmentEnsemble = segments.get(segmentNo).getValue();
boolean lastSegment = (segmentNo == (segments.size() - 1));
/*
* Checking the last segment of the ledger can be complicated in
* some cases. In the case that the ledger is closed, we can just
* check the fragments of the segment as normal, except in the case
* that no entry was ever written, to the ledger, in which case we
* check no fragments.
*
* Following the same approach as in LedgerChecker.checkLedger
*/
if (lastSegment && isLedgerClosed && (ledgerMetadata.getLastEntryId() < segments.get(segmentNo).getKey())) {
return false;
}
/*
* If current segment ensemble doesn't contain this bookie then return
* false.
*/
if (!currentSegmentEnsemble.contains(bookieAddress)) {
return false;
}
/*
* if ensembleSize is equal to writeQuorumSize, then ofcourse all
* the entries of this segment are supposed to be stored in this
* bookie. If this is last segment of the ledger and if the ledger
* is not closed (this is a corner case), then we have to return
* true. For more info. Check BOOKKEEPER-237 and BOOKKEEPER-325.
*/
if ((lastSegment && !isLedgerClosed) || (ensembleSize == writeQuorumSize)) {
return true;
}
/*
* the following check is required because ensembleSize can be
* greater than writeQuorumSize and in this case if there are only
* couple of entries then based on RoundRobinDistributionSchedule
* there might not be any entry copy in this bookie though this
* bookie is part of the ensemble of this segment. If no entry is
* stored in this bookie then we should return false, because
* ReplicationWorker wont take care of fixing the ledgerMetadata of
* this segment in this case.
*
* if ensembleSize > writeQuorumSize, then in LedgerFragment.java
* firstEntryID may not be equal to firstStoredEntryId lastEntryId
* may not be equalto lastStoredEntryId. firstStoredEntryId and
* lastStoredEntryId will be LedgerHandle.INVALID_ENTRY_ID, if no
* entry of this segment stored in this bookie. In this case
* LedgerChecker.verifyLedgerFragment will not consider it as
* unavailable/bad fragment though this bookie is part of the
* ensemble of the segment and it is down.
*/
DistributionSchedule distributionSchedule = new RoundRobinDistributionSchedule(
ledgerMetadata.getWriteQuorumSize(), ledgerMetadata.getAckQuorumSize(),
ledgerMetadata.getEnsembleSize());
int thisBookieIndexInCurrentEnsemble = currentSegmentEnsemble.indexOf(bookieAddress);
long firstEntryId = segments.get(segmentNo).getKey();
long lastEntryId = lastSegment ? ledgerMetadata.getLastEntryId() : segments.get(segmentNo + 1).getKey() - 1;
long firstStoredEntryId = LedgerHandle.INVALID_ENTRY_ID;
long firstEntryIter = firstEntryId;
// following the same approach followed in LedgerFragment.getFirstStoredEntryId()
for (int i = 0; i < ensembleSize && firstEntryIter <= lastEntryId; i++) {
if (distributionSchedule.hasEntry(firstEntryIter, thisBookieIndexInCurrentEnsemble)) {
firstStoredEntryId = firstEntryIter;
break;
} else {
firstEntryIter++;
}
}
return firstStoredEntryId != LedgerHandle.INVALID_ENTRY_ID;
}
/**
* returns boolean value specifying if the ensemble of the segment is
* adhering to the ensemble placement policy for the given writeQuorumSize
* and ackQuorumSize.
*
* @param ensembleBookiesList
* ensemble of the segment
* @param writeQuorumSize
* writeQuorumSize of the ledger
* @param ackQuorumSize
* ackQuorumSize of the ledger
* @return <tt>true</tt> if the ledger is adhering to
* EnsemblePlacementPolicy
*/
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleBookiesList,
int writeQuorumSize, int ackQuorumSize) {
return bkc.getPlacementPolicy().isEnsembleAdheringToPlacementPolicy(ensembleBookiesList, writeQuorumSize,
ackQuorumSize);
}
public Map<Integer, BookieId> replaceNotAdheringPlacementPolicyBookie(List<BookieId> ensembleBookiesList,
int writeQuorumSize, int ackQuorumSize) {
try {
EnsemblePlacementPolicy.PlacementResult<List<BookieId>> placementResult = bkc.getPlacementPolicy()
.replaceToAdherePlacementPolicy(ensembleBookiesList.size(), writeQuorumSize, ackQuorumSize,
new HashSet<>(), ensembleBookiesList);
if (PlacementPolicyAdherence.FAIL != placementResult.getAdheringToPolicy()) {
Map<Integer, BookieId> targetMap = new HashMap<>();
List<BookieId> newEnsembles = placementResult.getResult();
for (int i = 0; i < ensembleBookiesList.size(); i++) {
BookieId originBookie = ensembleBookiesList.get(i);
BookieId newBookie = newEnsembles.get(i);
if (!originBookie.equals(newBookie)) {
targetMap.put(i, newBookie);
}
}
return targetMap;
}
} catch (UnsupportedOperationException e) {
LOG.warn("The placement policy: {} didn't support replaceToAdherePlacementPolicy, "
+ "ignore replace not adhere bookie.", bkc.getPlacementPolicy().getClass().getName());
}
return Collections.emptyMap();
}
/**
* Makes async request for getting list of entries of ledger from a bookie
* and returns Future for the result.
*
* @param address
* BookieId of the bookie
* @param ledgerId
* ledgerId
* @return returns Future
*/
public CompletableFuture<AvailabilityOfEntriesOfLedger> asyncGetListOfEntriesOfLedger(BookieId address,
long ledgerId) {
return bkc.getBookieClient().getListOfEntriesOfLedger(address, ledgerId);
}
public BookieId getCurrentAuditor() throws IOException, InterruptedException {
return getLedgerAuditorManager().getCurrentAuditor();
}
}
| 360 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.BitSet;
import java.util.Map;
import org.apache.bookkeeper.net.BookieId;
/**
* This interface determins how entries are distributed among bookies.
*
* <p>Every entry gets replicated to some number of replicas. The first replica for
* an entry is given a replicaIndex of 0, and so on. To distribute write load,
* not all entries go to all bookies. Given an entry-id and replica index, an
* {@link DistributionSchedule} determines which bookie that replica should go
* to.
*/
public interface DistributionSchedule {
/**
* A write set represents the set of bookies to which
* a request will be written.
* The set consists of a list of indices which can be
* used to lookup the bookie in the ensemble.
*/
interface WriteSet {
/**
* The number of indexes in the write set.
*/
int size();
/**
* Whether the set contains the given index.
*/
boolean contains(int i);
/**
* Get the index at index i.
*/
int get(int i);
/**
* Set the index at index i.
* @return the previous value at that index.
*/
int set(int i, int index);
/**
* Sort the indices.
*/
void sort();
/**
* Index of a specified bookie index.
* -1 if not found.
*/
int indexOf(int index);
/**
* If we want a write set to cover all bookies in an ensemble
* of size X, then all of the index from 0..X must exist in the
* write set. This method appends those which are missing to the
* end of the write set.
*/
void addMissingIndices(int maxIndex);
/**
* Move an index from one position to another,
* shifting the other indices accordingly.
*/
void moveAndShift(int from, int to);
/**
* Recycle write set object when not in use.
*/
void recycle();
/**
* Make a deep copy of this write set.
*/
WriteSet copy();
}
WriteSet NULL_WRITE_SET = new WriteSet() {
@Override
public int size() {
return 0;
}
@Override
public boolean contains(int i) {
return false;
}
@Override
public int get(int i) {
throw new ArrayIndexOutOfBoundsException();
}
@Override
public int set(int i, int index) {
throw new ArrayIndexOutOfBoundsException();
}
@Override
public void sort() {}
@Override
public int indexOf(int index) {
return -1;
}
@Override
public void addMissingIndices(int maxIndex) {
throw new ArrayIndexOutOfBoundsException();
}
@Override
public void moveAndShift(int from, int to) {
throw new ArrayIndexOutOfBoundsException();
}
@Override
public void recycle() {}
@Override
public WriteSet copy() {
return this;
}
};
int getWriteQuorumSize();
/**
* Return the set of bookie indices to send the message to.
*/
WriteSet getWriteSet(long entryId);
/**
* Return the WriteSet bookie index for a given and index
* in the WriteSet.
*
* @param entryId
* @param writeSetIndex
* @return
*/
int getWriteSetBookieIndex(long entryId, int writeSetIndex);
/**
* Return the set of bookies indices to send the messages to the whole ensemble.
*
* @param entryId entry id used to calculate the ensemble.
* @return the set of bookies indices to send the request.
*/
WriteSet getEnsembleSet(long entryId);
/**
* An ack set represents the set of bookies from which
* a response must be received so that an entry can be
* considered to be replicated on a quorum.
*/
interface AckSet {
/**
* Add a bookie response and check if quorum has been met.
* @return true if quorum has been met, false otherwise
*/
boolean completeBookieAndCheck(int bookieIndexHeardFrom);
/**
* Received failure response from a bookie and check if ack quorum
* will be broken.
*
* @param bookieIndexHeardFrom
* bookie index that failed.
* @param address
* bookie address
* @return true if ack quorum is broken, false otherwise.
*/
boolean failBookieAndCheck(int bookieIndexHeardFrom, BookieId address);
/**
* Return the list of bookies that already failed.
*
* @return the list of bookies that already failed.
*/
Map<Integer, BookieId> getFailedBookies();
/**
* Invalidate a previous bookie response.
* Used for reissuing write requests.
*/
boolean removeBookieAndCheck(int bookie);
/**
* Recycle this ack set when not used anymore.
*/
void recycle();
}
/**
* Returns an ackset object, responses should be checked against this.
*/
AckSet getAckSet();
/**
* Returns an ackset object useful to wait for all bookies in the ensemble,
* responses should be checked against this.
*/
AckSet getEnsembleAckSet();
/**
* Interface to keep track of which bookies in an ensemble, an action
* has been performed for.
*/
interface QuorumCoverageSet {
/**
* Add a bookie to the result set.
*
* @param bookieIndexHeardFrom Bookie we've just heard from
*/
void addBookie(int bookieIndexHeardFrom, int rc);
/**
* check if all quorum in the set have had the action performed for it.
*
* @return whether all quorums have been covered
*/
boolean checkCovered();
}
QuorumCoverageSet getCoverageSet();
/**
* Whether entry presents on given bookie index.
*
* @param entryId
* - entryId to check the presence on given bookie index
* @param bookieIndex
* - bookie index on which it need to check the possible presence
* of the entry
* @return true if it has entry otherwise false.
*/
boolean hasEntry(long entryId, int bookieIndex);
/**
* Get the bitset representing the entries from entry 'startEntryId' to
* 'lastEntryId', that would be striped to the bookie with index -
* bookieIndex. Value of the bit with the 'bitIndex+n', indicate whether
* entry with entryid 'startEntryId+n' is striped to this bookie or not.
*
* @param bookieIndex
* index of the bookie in the ensemble starting with 0
* @param startEntryId
* starting entryid
* @param lastEntryId
* last entryid
* @return the bitset representing the entries that would be striped to the
* bookie
*/
BitSet getEntriesStripedToTheBookie(int bookieIndex, long startEntryId, long lastEntryId);
}
| 361 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookieWatcherImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ENSEMBLE_NOT_ADHERING_TO_PLACEMENT_POLICY_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NEW_ENSEMBLE_TIME;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.REPLACE_BOOKIE_TIME;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.WATCHER_SCOPE;
import static org.apache.bookkeeper.client.BookKeeperClientStats.CREATE_OP;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookKeeperServerStats;
import org.apache.bookkeeper.client.BKException.BKInterruptedException;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BKException.MetaStoreException;
import org.apache.bookkeeper.client.EnsemblePlacementPolicy.PlacementPolicyAdherence;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* This class is responsible for maintaining a consistent view of what bookies
* are available by reading Zookeeper (and setting watches on the bookie nodes).
* When a bookie fails, the other parts of the code turn to this class to find a
* replacement
*
*/
@StatsDoc(
name = WATCHER_SCOPE,
help = "Bookie watcher related stats"
)
@Slf4j
class BookieWatcherImpl implements BookieWatcher {
private static final Function<Throwable, BKException> EXCEPTION_FUNC = cause -> {
if (cause instanceof BKException) {
log.error("Failed to get bookie list : ", cause);
return (BKException) cause;
} else if (cause instanceof InterruptedException) {
log.error("Interrupted reading bookie list : ", cause);
return new BKInterruptedException();
} else {
MetaStoreException mse = new MetaStoreException(cause);
return mse;
}
};
private final ClientConfiguration conf;
private final RegistrationClient registrationClient;
private final EnsemblePlacementPolicy placementPolicy;
@StatsDoc(
name = NEW_ENSEMBLE_TIME,
help = "operation stats of new ensembles",
parent = CREATE_OP
)
private final OpStatsLogger newEnsembleTimer;
@StatsDoc(
name = REPLACE_BOOKIE_TIME,
help = "operation stats of replacing bookie in an ensemble"
)
private final OpStatsLogger replaceBookieTimer;
@StatsDoc(
name = ENSEMBLE_NOT_ADHERING_TO_PLACEMENT_POLICY_COUNT,
help = "total number of newEnsemble/replaceBookie operations failed to adhere"
+ " EnsemblePlacementPolicy"
)
private final Counter ensembleNotAdheringToPlacementPolicy;
// Bookies that will not be preferred to be chosen in a new ensemble
final Cache<BookieId, Boolean> quarantinedBookies;
private volatile Set<BookieId> writableBookies = Collections.emptySet();
private volatile Set<BookieId> readOnlyBookies = Collections.emptySet();
private CompletableFuture<?> initialWritableBookiesFuture = null;
private CompletableFuture<?> initialReadonlyBookiesFuture = null;
private final BookieAddressResolver bookieAddressResolver;
public BookieWatcherImpl(ClientConfiguration conf,
EnsemblePlacementPolicy placementPolicy,
RegistrationClient registrationClient,
BookieAddressResolver bookieAddressResolver,
StatsLogger statsLogger) {
this.conf = conf;
this.bookieAddressResolver = bookieAddressResolver;
this.placementPolicy = placementPolicy;
this.registrationClient = registrationClient;
this.quarantinedBookies = CacheBuilder.newBuilder()
.expireAfterWrite(conf.getBookieQuarantineTimeSeconds(), TimeUnit.SECONDS)
.removalListener(new RemovalListener<BookieId, Boolean>() {
@Override
public void onRemoval(RemovalNotification<BookieId, Boolean> bookie) {
log.info("Bookie {} is no longer quarantined", bookie.getKey());
}
}).build();
this.newEnsembleTimer = statsLogger.getOpStatsLogger(NEW_ENSEMBLE_TIME);
this.replaceBookieTimer = statsLogger.getOpStatsLogger(REPLACE_BOOKIE_TIME);
this.ensembleNotAdheringToPlacementPolicy = statsLogger
.getCounter(BookKeeperServerStats.ENSEMBLE_NOT_ADHERING_TO_PLACEMENT_POLICY_COUNT);
}
@Override
public Set<BookieId> getBookies() throws BKException {
try {
return FutureUtils.result(registrationClient.getWritableBookies(), EXCEPTION_FUNC).getValue();
} catch (BKInterruptedException ie) {
Thread.currentThread().interrupt();
throw ie;
}
}
@Override
public Set<BookieId> getAllBookies() throws BKException {
try {
return FutureUtils.result(registrationClient.getAllBookies(), EXCEPTION_FUNC).getValue();
} catch (BKInterruptedException ie) {
Thread.currentThread().interrupt();
throw ie;
}
}
@Override
public BookieAddressResolver getBookieAddressResolver() {
return this.bookieAddressResolver;
}
@Override
public Set<BookieId> getReadOnlyBookies()
throws BKException {
try {
return FutureUtils.result(registrationClient.getReadOnlyBookies(), EXCEPTION_FUNC).getValue();
} catch (BKInterruptedException ie) {
Thread.currentThread().interrupt();
throw ie;
}
}
/**
* Determine if a bookie should be considered unavailable.
* This does not require a network call because this class
* maintains a current view of readonly and writable bookies.
* An unavailable bookie is one that is neither read only nor
* writable.
*
* @param id
* Bookie to check
* @return whether or not the given bookie is unavailable
*/
@Override
public boolean isBookieUnavailable(BookieId id) {
return !readOnlyBookies.contains(id) && !writableBookies.contains(id);
}
// this callback is already not executed in zookeeper thread
private synchronized void processWritableBookiesChanged(Set<BookieId> newBookieAddrs) {
// Update watcher outside ZK callback thread, to avoid deadlock in case some other
// component is trying to do a blocking ZK operation
this.writableBookies = newBookieAddrs;
placementPolicy.onClusterChanged(newBookieAddrs, readOnlyBookies);
// we don't need to close clients here, because:
// a. the dead bookies will be removed from topology, which will not be used in new ensemble.
// b. the read sequence will be reordered based on znode availability, so most of the reads
// will not be sent to them.
// c. the close here is just to disconnect the channel, which doesn't remove the channel from
// from pcbc map. we don't really need to disconnect the channel here, since if a bookie is
// really down, PCBC will disconnect itself based on netty callback. if we try to disconnect
// here, it actually introduces side-effects on case d.
// d. closing the client here will affect latency if the bookie is alive but just being flaky
// on its znode registration due zookeeper session expire.
// e. if we want to permanently remove a bookkeeper client, we should watch on the cookies' list.
// if (bk.getBookieClient() != null) {
// bk.getBookieClient().closeClients(deadBookies);
// }
}
private synchronized void processReadOnlyBookiesChanged(Set<BookieId> readOnlyBookies) {
this.readOnlyBookies = readOnlyBookies;
placementPolicy.onClusterChanged(writableBookies, readOnlyBookies);
}
/**
* Blocks until bookies are read from zookeeper, used in the {@link BookKeeper} constructor.
*
* @throws BKException when failed to read bookies
*/
public void initialBlockingBookieRead() throws BKException {
CompletableFuture<?> writable;
CompletableFuture<?> readonly;
synchronized (this) {
if (initialReadonlyBookiesFuture == null) {
assert initialWritableBookiesFuture == null;
writable = this.registrationClient.watchWritableBookies(
bookies -> processWritableBookiesChanged(bookies.getValue()));
readonly = this.registrationClient.watchReadOnlyBookies(
bookies -> processReadOnlyBookiesChanged(bookies.getValue()));
initialWritableBookiesFuture = writable;
initialReadonlyBookiesFuture = readonly;
} else {
writable = initialWritableBookiesFuture;
readonly = initialReadonlyBookiesFuture;
}
}
try {
FutureUtils.result(writable, EXCEPTION_FUNC);
} catch (BKInterruptedException ie) {
Thread.currentThread().interrupt();
throw ie;
}
try {
FutureUtils.result(readonly, EXCEPTION_FUNC);
} catch (BKInterruptedException ie) {
Thread.currentThread().interrupt();
throw ie;
} catch (Exception e) {
log.error("Failed getReadOnlyBookies: ", e);
}
}
@Override
public List<BookieId> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, Map<String, byte[]> customMetadata)
throws BKNotEnoughBookiesException {
long startTime = MathUtils.nowInNano();
EnsemblePlacementPolicy.PlacementResult<List<BookieId>> newEnsembleResponse;
List<BookieId> socketAddresses;
PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy;
try {
Set<BookieId> quarantinedBookiesSet = quarantinedBookies.asMap().keySet();
newEnsembleResponse = placementPolicy.newEnsemble(ensembleSize, writeQuorumSize, ackQuorumSize,
customMetadata, new HashSet<BookieId>(quarantinedBookiesSet));
socketAddresses = newEnsembleResponse.getResult();
isEnsembleAdheringToPlacementPolicy = newEnsembleResponse.getAdheringToPolicy();
if (isEnsembleAdheringToPlacementPolicy == PlacementPolicyAdherence.FAIL) {
ensembleNotAdheringToPlacementPolicy.inc();
if (ensembleSize > 1) {
log.warn("New ensemble: {} is not adhering to Placement Policy. quarantinedBookies: {}",
socketAddresses, quarantinedBookiesSet);
}
}
// we try to only get from the healthy bookies first
newEnsembleTimer.registerSuccessfulEvent(MathUtils.nowInNano() - startTime, TimeUnit.NANOSECONDS);
} catch (BKNotEnoughBookiesException e) {
if (log.isDebugEnabled()) {
log.debug("Not enough healthy bookies available, using quarantined bookies");
}
newEnsembleResponse = placementPolicy.newEnsemble(
ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, new HashSet<>());
socketAddresses = newEnsembleResponse.getResult();
isEnsembleAdheringToPlacementPolicy = newEnsembleResponse.getAdheringToPolicy();
if (isEnsembleAdheringToPlacementPolicy == PlacementPolicyAdherence.FAIL) {
ensembleNotAdheringToPlacementPolicy.inc();
log.warn("New ensemble: {} is not adhering to Placement Policy", socketAddresses);
}
newEnsembleTimer.registerFailedEvent(MathUtils.nowInNano() - startTime, TimeUnit.NANOSECONDS);
}
return socketAddresses;
}
@Override
public BookieId replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata,
List<BookieId> existingBookies, int bookieIdx,
Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
long startTime = MathUtils.nowInNano();
BookieId addr = existingBookies.get(bookieIdx);
EnsemblePlacementPolicy.PlacementResult<BookieId> replaceBookieResponse;
BookieId socketAddress;
PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy = PlacementPolicyAdherence.FAIL;
try {
// we exclude the quarantined bookies also first
Set<BookieId> excludedBookiesAndQuarantinedBookies = new HashSet<BookieId>(
excludeBookies);
Set<BookieId> quarantinedBookiesSet = quarantinedBookies.asMap().keySet();
excludedBookiesAndQuarantinedBookies.addAll(quarantinedBookiesSet);
replaceBookieResponse = placementPolicy.replaceBookie(
ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata,
existingBookies, addr, excludedBookiesAndQuarantinedBookies);
socketAddress = replaceBookieResponse.getResult();
isEnsembleAdheringToPlacementPolicy = replaceBookieResponse.getAdheringToPolicy();
if (isEnsembleAdheringToPlacementPolicy == PlacementPolicyAdherence.FAIL) {
ensembleNotAdheringToPlacementPolicy.inc();
log.warn(
"replaceBookie for bookie: {} in ensemble: {} is not adhering to placement policy and"
+ " chose {}. excludedBookies {} and quarantinedBookies {}",
addr, existingBookies, socketAddress, excludeBookies, quarantinedBookiesSet);
}
replaceBookieTimer.registerSuccessfulEvent(MathUtils.nowInNano() - startTime, TimeUnit.NANOSECONDS);
} catch (BKNotEnoughBookiesException e) {
if (log.isDebugEnabled()) {
log.debug("Not enough healthy bookies available, using quarantined bookies");
}
replaceBookieResponse = placementPolicy.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize,
customMetadata, existingBookies, addr, excludeBookies);
socketAddress = replaceBookieResponse.getResult();
isEnsembleAdheringToPlacementPolicy = replaceBookieResponse.getAdheringToPolicy();
if (isEnsembleAdheringToPlacementPolicy == PlacementPolicyAdherence.FAIL) {
ensembleNotAdheringToPlacementPolicy.inc();
log.warn(
"replaceBookie for bookie: {} in ensemble: {} is not adhering to placement policy and"
+ " chose {}. excludedBookies {}",
addr, existingBookies, socketAddress, excludeBookies);
}
replaceBookieTimer.registerFailedEvent(MathUtils.nowInNano() - startTime, TimeUnit.NANOSECONDS);
}
return socketAddress;
}
/**
* Quarantine <i>bookie</i> so it will not be preferred to be chosen for new ensembles.
* @param bookie
*/
@Override
public void quarantineBookie(BookieId bookie) {
if (quarantinedBookies.getIfPresent(bookie) == null) {
quarantinedBookies.put(bookie, Boolean.TRUE);
log.warn("Bookie {} has been quarantined because of read/write errors.", bookie);
}
}
@Override
public void releaseAllQuarantinedBookies(){
quarantinedBookies.invalidateAll();
}
}
| 362 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DefaultEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import io.netty.util.HashedWheelTimer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.WeightedRandomSelection.WeightedObject;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default Ensemble Placement Policy, which picks bookies randomly.
*
* @see EnsemblePlacementPolicy
*/
public class DefaultEnsemblePlacementPolicy implements EnsemblePlacementPolicy {
static final Logger LOG = LoggerFactory.getLogger(DefaultEnsemblePlacementPolicy.class);
static final Set<BookieId> EMPTY_SET = new HashSet<BookieId>();
private boolean isWeighted;
private int maxWeightMultiple;
private Set<BookieId> knownBookies = new HashSet<BookieId>();
private Map<BookieId, WeightedObject> bookieInfoMap;
private WeightedRandomSelection<BookieId> weightedSelection;
private final ReentrantReadWriteLock rwLock;
DefaultEnsemblePlacementPolicy() {
bookieInfoMap = new HashMap<BookieId, WeightedObject>();
rwLock = new ReentrantReadWriteLock();
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int quorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
ArrayList<BookieId> newBookies = new ArrayList<BookieId>(ensembleSize);
if (ensembleSize <= 0) {
return PlacementResult.of(newBookies, PlacementPolicyAdherence.FAIL);
}
List<BookieId> allBookies;
rwLock.readLock().lock();
try {
allBookies = new ArrayList<BookieId>(knownBookies);
} finally {
rwLock.readLock().unlock();
}
if (isWeighted) {
// hold the readlock while selecting bookies. We don't want the list of bookies
// changing while we are creating the ensemble
rwLock.readLock().lock();
try {
if (CollectionUtils.subtract(allBookies, excludeBookies).size() < ensembleSize) {
throw new BKNotEnoughBookiesException();
}
while (ensembleSize > 0) {
BookieId b = weightedSelection.getNextRandom();
if (newBookies.contains(b) || excludeBookies.contains(b)) {
continue;
}
newBookies.add(b);
--ensembleSize;
if (ensembleSize == 0) {
return PlacementResult.of(newBookies,
isEnsembleAdheringToPlacementPolicy(newBookies, quorumSize, ackQuorumSize));
}
}
} finally {
rwLock.readLock().unlock();
}
} else {
Collections.shuffle(allBookies);
for (BookieId bookie : allBookies) {
if (excludeBookies.contains(bookie)) {
continue;
}
newBookies.add(bookie);
--ensembleSize;
if (ensembleSize == 0) {
return PlacementResult.of(newBookies,
isEnsembleAdheringToPlacementPolicy(newBookies, quorumSize, ackQuorumSize));
}
}
}
throw new BKNotEnoughBookiesException();
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
excludeBookies.addAll(currentEnsemble);
List<BookieId> addresses = newEnsemble(1, 1, 1, customMetadata, excludeBookies).getResult();
BookieId candidateAddr = addresses.get(0);
List<BookieId> newEnsemble = new ArrayList<BookieId>(currentEnsemble);
newEnsemble.set(currentEnsemble.indexOf(bookieToReplace), candidateAddr);
return PlacementResult.of(candidateAddr,
isEnsembleAdheringToPlacementPolicy(newEnsemble, writeQuorumSize, ackQuorumSize));
}
@Override
public Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies) {
rwLock.writeLock().lock();
try {
HashSet<BookieId> deadBookies;
deadBookies = new HashSet<BookieId>(knownBookies);
deadBookies.removeAll(writableBookies);
// readonly bookies should not be treated as dead bookies
deadBookies.removeAll(readOnlyBookies);
if (this.isWeighted) {
for (BookieId b : deadBookies) {
this.bookieInfoMap.remove(b);
}
@SuppressWarnings("unchecked")
Collection<BookieId> newBookies = CollectionUtils.subtract(writableBookies, knownBookies);
for (BookieId b : newBookies) {
this.bookieInfoMap.put(b, new BookieInfo());
}
if (deadBookies.size() > 0 || newBookies.size() > 0) {
this.weightedSelection.updateMap(this.bookieInfoMap);
}
}
knownBookies = writableBookies;
return deadBookies;
} finally {
rwLock.writeLock().unlock();
}
}
@Override
public void registerSlowBookie(BookieId bookieSocketAddress, long entryId) {
return;
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return writeSet;
}
@Override
public DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
writeSet.addMissingIndices(ensemble.size());
return writeSet;
}
@Override
public EnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver,
HashedWheelTimer hashedWheelTimer,
FeatureProvider featureProvider,
StatsLogger statsLogger,
BookieAddressResolver bookieAddressResolver) {
this.isWeighted = conf.getDiskWeightBasedPlacementEnabled();
if (this.isWeighted) {
this.maxWeightMultiple = conf.getBookieMaxWeightMultipleForWeightBasedPlacement();
this.weightedSelection = new WeightedRandomSelectionImpl<BookieId>(this.maxWeightMultiple);
}
return this;
}
@Override
public void updateBookieInfo(Map<BookieId, BookieInfo> bookieInfoMap) {
rwLock.writeLock().lock();
try {
for (Map.Entry<BookieId, BookieInfo> e : bookieInfoMap.entrySet()) {
this.bookieInfoMap.put(e.getKey(), e.getValue());
}
this.weightedSelection.updateMap(this.bookieInfoMap);
} finally {
rwLock.writeLock().unlock();
}
}
@Override
public void uninitalize() {
// do nothing
}
@Override
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
return PlacementPolicyAdherence.MEETS_STRICT;
}
}
| 363 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/PendingAddOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.proto.BookieProtocol.FLAG_HIGH_PRIORITY;
import static org.apache.bookkeeper.proto.BookieProtocol.FLAG_NONE;
import static org.apache.bookkeeper.proto.BookieProtocol.FLAG_RECOVERY_ADD;
import com.google.common.collect.ImmutableMap;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.netty.buffer.ByteBuf;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ReferenceCounted;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.client.AsyncCallback.AddCallbackWithLatency;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
import org.apache.bookkeeper.util.MathUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a pending add operation. When it has got success from all
* bookies, it sees if its at the head of the pending adds queue, and if yes,
* sends ack back to the application. If a bookie fails, a replacement is made
* and placed at the same position in the ensemble. The pending adds are then
* rereplicated.
*
*
*/
class PendingAddOp implements WriteCallback {
private static final Logger LOG = LoggerFactory.getLogger(PendingAddOp.class);
ByteBuf payload;
ReferenceCounted toSend;
AddCallbackWithLatency cb;
Object ctx;
long entryId;
int entryLength;
DistributionSchedule.AckSet ackSet;
boolean completed = false;
LedgerHandle lh;
ClientContext clientCtx;
boolean isRecoveryAdd = false;
volatile long requestTimeNanos;
long qwcLatency; // Quorum Write Completion Latency after response from quorum bookies.
Set<BookieId> addEntrySuccessBookies;
long writeDelayedStartTime; // min fault domains completion latency after response from ack quorum bookies
long currentLedgerLength;
int pendingWriteRequests;
boolean callbackTriggered;
boolean hasRun;
EnumSet<WriteFlag> writeFlags;
boolean allowFailFast = false;
List<BookieId> ensemble;
@SuppressFBWarnings("IS2_INCONSISTENT_SYNC")
static PendingAddOp create(LedgerHandle lh, ClientContext clientCtx,
List<BookieId> ensemble,
ByteBuf payload, EnumSet<WriteFlag> writeFlags,
AddCallbackWithLatency cb, Object ctx) {
PendingAddOp op = RECYCLER.get();
op.lh = lh;
op.clientCtx = clientCtx;
op.isRecoveryAdd = false;
op.cb = cb;
op.ctx = ctx;
op.entryId = LedgerHandle.INVALID_ENTRY_ID;
op.currentLedgerLength = -1;
op.payload = payload;
op.entryLength = payload.readableBytes();
op.completed = false;
op.ensemble = ensemble;
op.ackSet = lh.getDistributionSchedule().getAckSet();
op.pendingWriteRequests = 0;
op.callbackTriggered = false;
op.hasRun = false;
op.requestTimeNanos = Long.MAX_VALUE;
op.allowFailFast = false;
op.qwcLatency = 0;
op.writeFlags = writeFlags;
if (op.addEntrySuccessBookies == null) {
op.addEntrySuccessBookies = new HashSet<>();
} else {
op.addEntrySuccessBookies.clear();
}
op.writeDelayedStartTime = -1;
return op;
}
/**
* Enable the recovery add flag for this operation.
* @see LedgerHandle#asyncRecoveryAddEntry
*/
PendingAddOp enableRecoveryAdd() {
isRecoveryAdd = true;
return this;
}
PendingAddOp allowFailFastOnUnwritableChannel() {
allowFailFast = true;
return this;
}
void setEntryId(long entryId) {
this.entryId = entryId;
}
void setLedgerLength(long ledgerLength) {
this.currentLedgerLength = ledgerLength;
}
long getEntryId() {
return this.entryId;
}
private void sendWriteRequest(List<BookieId> ensemble, int bookieIndex) {
int flags = isRecoveryAdd ? FLAG_RECOVERY_ADD | FLAG_HIGH_PRIORITY : FLAG_NONE;
clientCtx.getBookieClient().addEntry(ensemble.get(bookieIndex),
lh.ledgerId, lh.ledgerKey, entryId, toSend, this, bookieIndex,
flags, allowFailFast, lh.writeFlags);
++pendingWriteRequests;
}
boolean maybeTimeout() {
if (MathUtils.elapsedNanos(requestTimeNanos) >= clientCtx.getConf().addEntryQuorumTimeoutNanos) {
timeoutQuorumWait();
return true;
}
return false;
}
synchronized void timeoutQuorumWait() {
if (completed) {
return;
}
if (addEntrySuccessBookies.size() >= lh.getLedgerMetadata().getAckQuorumSize()) {
// If ackQuorum number of bookies have acknowledged the write but still not complete, indicates
// failures due to not having been written to enough fault domains. Increment corresponding
// counter.
clientCtx.getClientStats().getWriteTimedOutDueToNotEnoughFaultDomains().inc();
}
lh.handleUnrecoverableErrorDuringAdd(BKException.Code.AddEntryQuorumTimeoutException);
}
synchronized void unsetSuccessAndSendWriteRequest(List<BookieId> ensemble, int bookieIndex) {
// update the ensemble
this.ensemble = ensemble;
if (toSend == null) {
// this addOp hasn't yet had its mac computed. When the mac is
// computed, its write requests will be sent, so no need to send it
// now
return;
}
// Suppose that unset doesn't happen on the write set of an entry. In this
// case we don't need to resend the write request upon an ensemble change.
// We do need to invoke #sendAddSuccessCallbacks() for such entries because
// they may have already completed, but they are just waiting for the ensemble
// to change.
// E.g.
// ensemble (A, B, C, D), entry k is written to (A, B, D). An ensemble change
// happens to replace C with E. Entry k does not complete until C is
// replaced with E successfully. When the ensemble change completes, it tries
// to unset entry k. C however is not in k's write set, so no entry is written
// again, and no one triggers #sendAddSuccessCallbacks. Consequently, k never
// completes.
//
// We call sendAddSuccessCallback when unsetting t cover this case.
if (!lh.distributionSchedule.hasEntry(entryId, bookieIndex)) {
lh.sendAddSuccessCallbacks();
return;
}
if (callbackTriggered) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Unsetting success for ledger: " + lh.ledgerId + " entry: " + entryId + " bookie index: "
+ bookieIndex);
}
// if we had already heard a success from this array index, need to
// increment our number of responses that are pending, since we are
// going to unset this success
if (!ackSet.removeBookieAndCheck(bookieIndex)) {
// unset completed if this results in loss of ack quorum
completed = false;
}
sendWriteRequest(ensemble, bookieIndex);
}
/**
* Initiate the add operation.
*/
public synchronized void initiate() {
hasRun = true;
if (callbackTriggered) {
// this should only be true if the request was failed due
// to another request ahead in the pending queue,
// so we can just ignore this request
maybeRecycle();
return;
}
this.requestTimeNanos = MathUtils.nowInNano();
checkNotNull(lh);
checkNotNull(lh.macManager);
int flags = isRecoveryAdd ? FLAG_RECOVERY_ADD | FLAG_HIGH_PRIORITY : FLAG_NONE;
this.toSend = lh.macManager.computeDigestAndPackageForSending(
entryId, lh.lastAddConfirmed, currentLedgerLength,
payload, lh.ledgerKey, flags);
// ownership of RefCounted ByteBuf was passed to computeDigestAndPackageForSending
payload = null;
// We are about to send. Check if we need to make an ensemble change
// because of delayed write errors
lh.maybeHandleDelayedWriteBookieFailure();
// Iterate over set and trigger the sendWriteRequests
for (int i = 0; i < lh.distributionSchedule.getWriteQuorumSize(); i++) {
sendWriteRequest(ensemble, lh.distributionSchedule.getWriteSetBookieIndex(entryId, i));
}
}
@Override
public synchronized void writeComplete(int rc, long ledgerId, long entryId, BookieId addr, Object ctx) {
int bookieIndex = (Integer) ctx;
--pendingWriteRequests;
if (!ensemble.get(bookieIndex).equals(addr)) {
// ensemble has already changed, failure of this addr is immaterial
if (LOG.isDebugEnabled()) {
LOG.debug("Write did not succeed: " + ledgerId + ", " + entryId + ". But we have already fixed it.");
}
return;
}
// must record all acks, even if complete (completion can be undone by an ensemble change)
boolean ackQuorum = false;
if (BKException.Code.OK == rc) {
ackQuorum = ackSet.completeBookieAndCheck(bookieIndex);
addEntrySuccessBookies.add(ensemble.get(bookieIndex));
}
if (completed) {
if (rc != BKException.Code.OK) {
// Got an error after satisfying AQ. This means we are under replicated at the create itself.
// Update the stat to reflect it.
clientCtx.getClientStats().getAddOpUrCounter().inc();
if (!clientCtx.getConf().disableEnsembleChangeFeature.isAvailable()
&& !clientCtx.getConf().delayEnsembleChange) {
lh.notifyWriteFailed(bookieIndex, addr);
}
}
// even the add operation is completed, but because we don't reset completed flag back to false when
// #unsetSuccessAndSendWriteRequest doesn't break ack quorum constraint. we still have current pending
// add op is completed but never callback. so do a check here to complete again.
//
// E.g. entry x is going to complete.
//
// 1) entry x + k hits a failure. lh.handleBookieFailure increases blockAddCompletions to 1, for ensemble
// change
// 2) entry x receives all responses, sets completed to true but fails to send success callback because
// blockAddCompletions is 1
// 3) ensemble change completed. lh unset success starting from x to x+k, but since the unset doesn't break
// ackSet constraint. #removeBookieAndCheck doesn't set completed back to false.
// 4) so when the retry request on new bookie completes, it finds the pending op is already completed.
// we have to trigger #sendAddSuccessCallbacks
//
sendAddSuccessCallbacks();
// I am already finished, ignore incoming responses.
// otherwise, we might hit the following error handling logic, which might cause bad things.
maybeRecycle();
return;
}
switch (rc) {
case BKException.Code.OK:
// continue
break;
case BKException.Code.ClientClosedException:
// bookie client is closed.
lh.errorOutPendingAdds(rc);
return;
case BKException.Code.IllegalOpException:
// illegal operation requested, like using unsupported feature in v2 protocol
lh.handleUnrecoverableErrorDuringAdd(rc);
return;
case BKException.Code.LedgerFencedException:
LOG.warn("Fencing exception on write: L{} E{} on {}",
ledgerId, entryId, addr);
lh.handleUnrecoverableErrorDuringAdd(rc);
return;
case BKException.Code.UnauthorizedAccessException:
LOG.warn("Unauthorized access exception on write: L{} E{} on {}",
ledgerId, entryId, addr);
lh.handleUnrecoverableErrorDuringAdd(rc);
return;
default:
if (clientCtx.getConf().delayEnsembleChange) {
if (ackSet.failBookieAndCheck(bookieIndex, addr)
|| rc == BKException.Code.WriteOnReadOnlyBookieException) {
Map<Integer, BookieId> failedBookies = ackSet.getFailedBookies();
LOG.warn("Failed to write entry ({}, {}) to bookies {}, handling failures.",
ledgerId, entryId, failedBookies);
// we can't meet ack quorum requirement, trigger ensemble change.
lh.handleBookieFailure(failedBookies);
} else if (LOG.isDebugEnabled()) {
LOG.debug("Failed to write entry ({}, {}) to bookie ({}, {}),"
+ " but it didn't break ack quorum, delaying ensemble change : {}",
ledgerId, entryId, bookieIndex, addr, BKException.getMessage(rc));
}
} else {
LOG.warn("Failed to write entry ({}, {}) to bookie ({}, {}): {}",
ledgerId, entryId, bookieIndex, addr, BKException.getMessage(rc));
lh.handleBookieFailure(ImmutableMap.of(bookieIndex, addr));
}
return;
}
if (ackQuorum && !completed) {
if (clientCtx.getConf().enforceMinNumFaultDomainsForWrite
&& !(clientCtx.getPlacementPolicy()
.areAckedBookiesAdheringToPlacementPolicy(addEntrySuccessBookies,
lh.getLedgerMetadata().getWriteQuorumSize(),
lh.getLedgerMetadata().getAckQuorumSize()))) {
LOG.warn("Write success for entry ID {} delayed, not acknowledged by bookies in enough fault domains",
entryId);
// Increment to indicate write did not complete due to not enough fault domains
clientCtx.getClientStats().getWriteDelayedDueToNotEnoughFaultDomains().inc();
// Only do this for the first time.
if (writeDelayedStartTime == -1) {
writeDelayedStartTime = MathUtils.nowInNano();
}
} else {
completed = true;
this.qwcLatency = MathUtils.elapsedNanos(requestTimeNanos);
if (writeDelayedStartTime != -1) {
clientCtx.getClientStats()
.getWriteDelayedDueToNotEnoughFaultDomainsLatency()
.registerSuccessfulEvent(MathUtils.elapsedNanos(writeDelayedStartTime),
TimeUnit.NANOSECONDS);
}
sendAddSuccessCallbacks();
}
}
}
void sendAddSuccessCallbacks() {
lh.sendAddSuccessCallbacks();
}
synchronized void submitCallback(final int rc) {
if (LOG.isDebugEnabled()) {
LOG.debug("Submit callback (lid:{}, eid: {}). rc:{}", lh.getId(), entryId, rc);
}
long latencyNanos = MathUtils.elapsedNanos(requestTimeNanos);
if (rc != BKException.Code.OK) {
clientCtx.getClientStats().getAddOpLogger().registerFailedEvent(latencyNanos, TimeUnit.NANOSECONDS);
LOG.error("Write of ledger entry to quorum failed: L{} E{}",
lh.getId(), entryId);
} else {
clientCtx.getClientStats().getAddOpLogger().registerSuccessfulEvent(latencyNanos, TimeUnit.NANOSECONDS);
}
cb.addCompleteWithLatency(rc, lh, entryId, qwcLatency, ctx);
callbackTriggered = true;
maybeRecycle();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("PendingAddOp(lid:").append(lh.ledgerId)
.append(", eid:").append(entryId).append(", completed:")
.append(completed).append(")");
return sb.toString();
}
@Override
public int hashCode() {
return (int) entryId;
}
@Override
public boolean equals(Object o) {
if (o instanceof PendingAddOp) {
return (this.entryId == ((PendingAddOp) o).entryId);
}
return (this == o);
}
private final Handle<PendingAddOp> recyclerHandle;
private static final Recycler<PendingAddOp> RECYCLER = new Recycler<PendingAddOp>() {
@Override
protected PendingAddOp newObject(Recycler.Handle<PendingAddOp> handle) {
return new PendingAddOp(handle);
}
};
private PendingAddOp(Handle<PendingAddOp> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
private synchronized void maybeRecycle() {
/**
* We have opportunity to recycle two objects here.
* PendingAddOp#toSend and LedgerHandle#pendingAddOp
*
* A. LedgerHandle#pendingAddOp: This can be released after all 3 conditions are met.
* - After the callback is run
* - After safeRun finished by the executor
* - Write responses are returned from all bookies
* as BookieClient Holds a reference from the point the addEntry requests are sent.
*
* B. ByteBuf can be released after 2 of the conditions are met.
* - After the callback is run as we will not retry the write after callback
* - After safeRun finished by the executor
* BookieClient takes and releases on this buffer immediately after sending the data.
*
* The object can only be recycled after the above conditions are met
* otherwise we could end up recycling twice and all
* joy that goes along with that.
*/
if (hasRun && callbackTriggered) {
ReferenceCountUtil.release(toSend);
toSend = null;
}
// only recycle a pending add op after it has been run.
if (hasRun && toSend == null && pendingWriteRequests == 0) {
recyclePendAddOpObject();
}
}
public synchronized void recyclePendAddOpObject() {
entryId = LedgerHandle.INVALID_ENTRY_ID;
currentLedgerLength = -1;
if (payload != null) {
ReferenceCountUtil.release(payload);
payload = null;
}
cb = null;
ctx = null;
ensemble = null;
ackSet.recycle();
ackSet = null;
lh = null;
clientCtx = null;
isRecoveryAdd = false;
completed = false;
pendingWriteRequests = 0;
callbackTriggered = false;
hasRun = false;
allowFailFast = false;
writeFlags = null;
addEntrySuccessBookies.clear();
writeDelayedStartTime = -1;
recyclerHandle.recycle(this);
}
}
| 364 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkState;
import static org.apache.bookkeeper.client.api.BKException.Code.ClientClosedException;
import static org.apache.bookkeeper.client.api.BKException.Code.WriteException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Iterators;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.RateLimiter;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.AsyncCallback.AddCallbackWithLatency;
import org.apache.bookkeeper.client.AsyncCallback.CloseCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadLastConfirmedCallback;
import org.apache.bookkeeper.client.BKException.BKIncorrectParameterException;
import org.apache.bookkeeper.client.BKException.BKReadException;
import org.apache.bookkeeper.client.DistributionSchedule.WriteSet;
import org.apache.bookkeeper.client.SyncCallbackUtils.FutureReadLastConfirmed;
import org.apache.bookkeeper.client.SyncCallbackUtils.FutureReadLastConfirmedAndEntry;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncAddCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncCloseCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncReadCallback;
import org.apache.bookkeeper.client.SyncCallbackUtils.SyncReadLastConfirmedCallback;
import org.apache.bookkeeper.client.api.BKException.Code;
import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
import org.apache.bookkeeper.client.api.LedgerEntries;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.client.api.WriteHandle;
import org.apache.bookkeeper.client.impl.LedgerEntryImpl;
import org.apache.bookkeeper.common.concurrent.FutureEventListener;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.collections4.IteratorUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Ledger handle contains ledger metadata and is used to access the read and
* write operations to a ledger.
*/
public class LedgerHandle implements WriteHandle {
static final Logger LOG = LoggerFactory.getLogger(LedgerHandle.class);
private static final int STICKY_READ_BOOKIE_INDEX_UNSET = -1;
final ClientContext clientCtx;
final byte[] ledgerKey;
private Versioned<LedgerMetadata> versionedMetadata;
final long ledgerId;
final ExecutorService executor;
long lastAddPushed;
private enum HandleState {
OPEN,
CLOSED
}
private HandleState handleState = HandleState.OPEN;
private final CompletableFuture<Void> closePromise = new CompletableFuture<>();
/**
* Last entryId which has been confirmed to be written durably to the bookies.
* This value is used by readers, the LAC protocol
*/
volatile long lastAddConfirmed;
/**
* Next entryId which is expected to move forward during {@link #sendAddSuccessCallbacks() }. This is important
* in order to have an ordered sequence of addEntry acknowledged to the writer
*/
volatile long pendingAddsSequenceHead;
/**
* If bookie sticky reads are enabled, this will contain the index of the bookie
* selected as "sticky" for this ledger. The bookie is chosen at random when the
* LedgerHandle is created.
*
* <p>In case of failures, the bookie index will be updated (to the next bookie in
* the ensemble) to avoid continuing to attempt to read from a failed bookie.
*
* <p>If the index is -1, it means the sticky reads are disabled.
*/
private int stickyBookieIndex;
long length;
final DigestManager macManager;
final DistributionSchedule distributionSchedule;
final RateLimiter throttler;
final LoadingCache<BookieId, Long> bookieFailureHistory;
final BookiesHealthInfo bookiesHealthInfo;
final EnumSet<WriteFlag> writeFlags;
ScheduledFuture<?> timeoutFuture = null;
@VisibleForTesting
final Map<Integer, BookieId> delayedWriteFailedBookies =
new HashMap<Integer, BookieId>();
/**
* Invalid entry id. This value is returned from methods which
* should return an entry id but there is no valid entry available.
*/
public static final long INVALID_ENTRY_ID = BookieProtocol.INVALID_ENTRY_ID;
/**
* Invalid ledger id. Ledger IDs must be greater than or equal to 0.
* Large negative used to make it easy to spot in logs if erroneously used.
*/
public static final long INVALID_LEDGER_ID = -0xABCDABCDL;
final Object metadataLock = new Object();
boolean changingEnsemble = false;
final AtomicInteger numEnsembleChanges = new AtomicInteger(0);
Queue<PendingAddOp> pendingAddOps;
ExplicitLacFlushPolicy explicitLacFlushPolicy;
final Counter ensembleChangeCounter;
final Counter lacUpdateHitsCounter;
final Counter lacUpdateMissesCounter;
private final OpStatsLogger clientChannelWriteWaitStats;
LedgerHandle(ClientContext clientCtx,
long ledgerId, Versioned<LedgerMetadata> versionedMetadata,
BookKeeper.DigestType digestType, byte[] password,
EnumSet<WriteFlag> writeFlags)
throws GeneralSecurityException, NumberFormatException {
this.clientCtx = clientCtx;
this.versionedMetadata = versionedMetadata;
this.pendingAddOps = new ConcurrentLinkedQueue<PendingAddOp>();
this.writeFlags = writeFlags;
LedgerMetadata metadata = versionedMetadata.getValue();
if (metadata.isClosed()) {
lastAddConfirmed = lastAddPushed = metadata.getLastEntryId();
length = metadata.getLength();
} else {
lastAddConfirmed = lastAddPushed = INVALID_ENTRY_ID;
length = 0;
}
this.pendingAddsSequenceHead = lastAddConfirmed;
this.ledgerId = ledgerId;
this.executor = clientCtx.getMainWorkerPool().chooseThread(ledgerId);
if (clientCtx.getConf().enableStickyReads
&& getLedgerMetadata().getEnsembleSize() == getLedgerMetadata().getWriteQuorumSize()) {
stickyBookieIndex = clientCtx.getPlacementPolicy().getStickyReadBookieIndex(metadata, Optional.empty());
} else {
stickyBookieIndex = STICKY_READ_BOOKIE_INDEX_UNSET;
}
if (clientCtx.getConf().throttleValue > 0) {
this.throttler = RateLimiter.create(clientCtx.getConf().throttleValue);
} else {
this.throttler = null;
}
macManager = DigestManager.instantiate(ledgerId, password, BookKeeper.DigestType.toProtoDigestType(digestType),
clientCtx.getByteBufAllocator(), clientCtx.getConf().useV2WireProtocol);
// If the password is empty, pass the same random ledger key which is generated by the hash of the empty
// password, so that the bookie can avoid processing the keys for each entry
this.ledgerKey = DigestManager.generateMasterKey(password);
distributionSchedule = new RoundRobinDistributionSchedule(
metadata.getWriteQuorumSize(),
metadata.getAckQuorumSize(),
metadata.getEnsembleSize());
this.bookieFailureHistory = CacheBuilder.newBuilder()
.expireAfterWrite(clientCtx.getConf().bookieFailureHistoryExpirationMSec, TimeUnit.MILLISECONDS)
.build(new CacheLoader<BookieId, Long>() {
@Override
public Long load(BookieId key) {
return -1L;
}
});
this.bookiesHealthInfo = new BookiesHealthInfo() {
@Override
public long getBookieFailureHistory(BookieId bookieSocketAddress) {
Long lastFailure = bookieFailureHistory.getIfPresent(bookieSocketAddress);
return lastFailure == null ? -1L : lastFailure;
}
@Override
public long getBookiePendingRequests(BookieId bookieSocketAddress) {
return clientCtx.getBookieClient().getNumPendingRequests(bookieSocketAddress, ledgerId);
}
};
ensembleChangeCounter = clientCtx.getClientStats().getEnsembleChangeCounter();
lacUpdateHitsCounter = clientCtx.getClientStats().getLacUpdateHitsCounter();
lacUpdateMissesCounter = clientCtx.getClientStats().getLacUpdateMissesCounter();
clientChannelWriteWaitStats = clientCtx.getClientStats().getClientChannelWriteWaitLogger();
clientCtx.getClientStats().registerPendingAddsGauge(new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
return pendingAddOps.size();
}
});
initializeWriteHandleState();
}
/**
* Notify the LedgerHandle that a read operation was failed on a particular bookie.
*/
void recordReadErrorOnBookie(int bookieIndex) {
// If sticky bookie reads are enabled, switch the sticky bookie to the
// next bookie in the ensemble so that we avoid to keep reading from the
// same failed bookie
if (stickyBookieIndex != STICKY_READ_BOOKIE_INDEX_UNSET) {
// This will be idempotent when we have multiple read errors on the
// same bookie. The net result is that we just go to the next bookie
stickyBookieIndex = clientCtx.getPlacementPolicy().getStickyReadBookieIndex(getLedgerMetadata(),
Optional.of(bookieIndex));
}
}
protected void initializeWriteHandleState() {
if (clientCtx.getConf().explicitLacInterval > 0) {
explicitLacFlushPolicy = new ExplicitLacFlushPolicy.ExplicitLacFlushPolicyImpl(
this, clientCtx);
} else {
explicitLacFlushPolicy = ExplicitLacFlushPolicy.VOID_EXPLICITLAC_FLUSH_POLICY;
}
if (clientCtx.getConf().addEntryQuorumTimeoutNanos > 0) {
this.timeoutFuture = clientCtx.getScheduler().scheduleAtFixedRate(
() -> monitorPendingAddOps(),
clientCtx.getConf().timeoutMonitorIntervalSec,
clientCtx.getConf().timeoutMonitorIntervalSec,
TimeUnit.SECONDS);
}
}
private void tearDownWriteHandleState() {
explicitLacFlushPolicy.stopExplicitLacFlush();
if (timeoutFuture != null) {
timeoutFuture.cancel(false);
}
}
/**
* Get the id of the current ledger.
*
* @return the id of the ledger
*/
@Override
public long getId() {
return ledgerId;
}
@VisibleForTesting
public EnumSet<WriteFlag> getWriteFlags() {
return writeFlags;
}
/**
* {@inheritDoc}
*/
@Override
public synchronized long getLastAddConfirmed() {
return lastAddConfirmed;
}
synchronized void setLastAddConfirmed(long lac) {
this.lastAddConfirmed = lac;
}
/**
* {@inheritDoc}
*/
@Override
public synchronized long getLastAddPushed() {
return lastAddPushed;
}
/**
* Get the Ledger's key/password.
*
* @return byte array for the ledger's key/password.
*/
public byte[] getLedgerKey() {
return Arrays.copyOf(ledgerKey, ledgerKey.length);
}
/**
* {@inheritDoc}
*/
@Override
public LedgerMetadata getLedgerMetadata() {
return versionedMetadata.getValue();
}
Versioned<LedgerMetadata> getVersionedLedgerMetadata() {
return versionedMetadata;
}
boolean setLedgerMetadata(Versioned<LedgerMetadata> expected, Versioned<LedgerMetadata> newMetadata) {
synchronized (this) {
// ensure that we only update the metadata if it is the object we expect it to be
if (versionedMetadata == expected) {
versionedMetadata = newMetadata;
LedgerMetadata metadata = versionedMetadata.getValue();
if (metadata.isClosed()) {
lastAddConfirmed = lastAddPushed = metadata.getLastEntryId();
length = metadata.getLength();
}
return true;
} else {
return false;
}
}
}
/**
* Get this ledger's customMetadata map.
*
* @return map containing user provided customMetadata.
*/
public Map<String, byte[]> getCustomMetadata() {
return getLedgerMetadata().getCustomMetadata();
}
/**
* Get the number of fragments that makeup this ledger.
*
* @return the count of fragments
*/
public synchronized long getNumFragments() {
return getLedgerMetadata().getAllEnsembles().size();
}
/**
* Get the count of unique bookies that own part of this ledger
* by going over all the fragments of the ledger.
*
* @return count of unique bookies
*/
public synchronized long getNumBookies() {
Map<Long, ? extends List<BookieId>> m = getLedgerMetadata().getAllEnsembles();
Set<BookieId> s = Sets.newHashSet();
for (List<BookieId> aList : m.values()) {
s.addAll(aList);
}
return s.size();
}
/**
* Get the DigestManager.
*
* @return DigestManager for the LedgerHandle
*/
DigestManager getDigestManager() {
return macManager;
}
/**
* Add to the length of the ledger in bytes.
*
* @param delta
* @return the length of the ledger after the addition
*/
synchronized long addToLength(long delta) {
this.length += delta;
return this.length;
}
/**
* Returns the length of the ledger in bytes.
*
* @return the length of the ledger in bytes
*/
@Override
public synchronized long getLength() {
return this.length;
}
/**
* Returns the ledger creation time.
*
* @return the ledger creation time
*/
public long getCtime() {
return getLedgerMetadata().getCtime();
}
/**
* Get the Distribution Schedule.
*
* @return DistributionSchedule for the LedgerHandle
*/
DistributionSchedule getDistributionSchedule() {
return distributionSchedule;
}
/**
* Get the health info for bookies for this ledger.
*
* @return BookiesHealthInfo for every bookie in the write set.
*/
BookiesHealthInfo getBookiesHealthInfo() {
return bookiesHealthInfo;
}
/**
* {@inheritDoc}
*/
@Override
public void close()
throws InterruptedException, BKException {
SyncCallbackUtils.waitForResult(closeAsync());
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<Void> closeAsync() {
CompletableFuture<Void> result = new CompletableFuture<>();
SyncCloseCallback callback = new SyncCloseCallback(result);
asyncClose(callback, null);
return result;
}
/**
* Asynchronous close, any adds in flight will return errors.
*
* <p>Closing a ledger will ensure that all clients agree on what the last entry
* of the ledger is. This ensures that, once the ledger has been closed, all
* reads from the ledger will return the same set of entries.
*
* @param cb
* callback implementation
* @param ctx
* control object
*/
public void asyncClose(CloseCallback cb, Object ctx) {
asyncCloseInternal(cb, ctx, BKException.Code.LedgerClosedException);
}
/**
* {@inheritDoc}
*/
@Override
public synchronized boolean isClosed() {
return getLedgerMetadata().isClosed();
}
boolean isHandleWritable() {
return !getLedgerMetadata().isClosed() && handleState == HandleState.OPEN;
}
void asyncCloseInternal(final CloseCallback cb, final Object ctx, final int rc) {
try {
doAsyncCloseInternal(cb, ctx, rc);
} catch (RejectedExecutionException re) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to close ledger {} : ", ledgerId, re);
}
errorOutPendingAdds(BookKeeper.getReturnRc(clientCtx.getBookieClient(), rc));
cb.closeComplete(BookKeeper.getReturnRc(clientCtx.getBookieClient(), BKException.Code.InterruptedException),
this, ctx);
}
}
/**
* Same as public version of asyncClose except that this one takes an
* additional parameter which is the return code to hand to all the pending
* add ops.
*
* @param cb
* @param ctx
* @param rc
*/
void doAsyncCloseInternal(final CloseCallback cb, final Object ctx, final int rc) {
executeOrdered(() -> {
final HandleState prevHandleState;
final List<PendingAddOp> pendingAdds;
final long lastEntry;
final long finalLength;
closePromise.whenComplete((ignore, ex) -> {
if (ex != null) {
cb.closeComplete(
BKException.getExceptionCode(ex, BKException.Code.UnexpectedConditionException),
LedgerHandle.this, ctx);
} else {
cb.closeComplete(BKException.Code.OK, LedgerHandle.this, ctx);
}
});
synchronized (LedgerHandle.this) {
prevHandleState = handleState;
// drain pending adds first
pendingAdds = drainPendingAddsAndAdjustLength();
// taking the length must occur after draining, as draining changes the length
lastEntry = lastAddPushed = LedgerHandle.this.lastAddConfirmed;
finalLength = LedgerHandle.this.length;
handleState = HandleState.CLOSED;
}
// error out all pending adds during closing, the callbacks shouldn't be
// running under any bk locks.
try {
errorOutPendingAdds(rc, pendingAdds);
} catch (Throwable e) {
closePromise.completeExceptionally(e);
return;
}
if (prevHandleState != HandleState.CLOSED) {
if (LOG.isDebugEnabled()) {
LOG.debug("Closing ledger: {} at entryId {} with {} bytes", getId(), lastEntry,
finalLength);
}
tearDownWriteHandleState();
new MetadataUpdateLoop(
clientCtx.getLedgerManager(), getId(),
LedgerHandle.this::getVersionedLedgerMetadata,
(metadata) -> {
if (metadata.isClosed()) {
/* If the ledger has been closed with the same lastEntry
* and length that we planned to close with, we have nothing to do,
* so just return success */
if (lastEntry == metadata.getLastEntryId()
&& finalLength == metadata.getLength()) {
return false;
} else {
LOG.error("Metadata conflict when closing ledger {}."
+ " Another client may have recovered the ledger while "
+ "there"
+ " were writes outstanding. (local lastEntry:{} "
+ "length:{}) "
+ " (metadata lastEntry:{} length:{})",
getId(), lastEntry, finalLength,
metadata.getLastEntryId(), metadata.getLength());
throw new BKException.BKMetadataVersionException();
}
} else {
return true;
}
},
(metadata) -> {
return LedgerMetadataBuilder.from(metadata)
.withClosedState().withLastEntryId(lastEntry)
.withLength(finalLength).build();
},
LedgerHandle.this::setLedgerMetadata)
.run().whenComplete((metadata, ex) -> {
if (ex != null) {
closePromise.completeExceptionally(ex);
} else {
FutureUtils.complete(closePromise, null);
}
});
}
}
);
}
/**
* Read a sequence of entries synchronously.
*
* @param firstEntry
* id of first entry of sequence (included)
* @param lastEntry
* id of last entry of sequence (included)
*
* @see #asyncReadEntries(long, long, ReadCallback, Object)
*/
public Enumeration<LedgerEntry> readEntries(long firstEntry, long lastEntry)
throws InterruptedException, BKException {
CompletableFuture<Enumeration<LedgerEntry>> result = new CompletableFuture<>();
asyncReadEntries(firstEntry, lastEntry, new SyncReadCallback(result), null);
return SyncCallbackUtils.waitForResult(result);
}
/**
* Read a sequence of entries synchronously, allowing to read after the LastAddConfirmed range.<br>
* This is the same of
* {@link #asyncReadUnconfirmedEntries(long, long, ReadCallback, Object) }
*
* @param firstEntry
* id of first entry of sequence (included)
* @param lastEntry
* id of last entry of sequence (included)
*
* @see #readEntries(long, long)
* @see #asyncReadUnconfirmedEntries(long, long, ReadCallback, java.lang.Object)
* @see #asyncReadLastConfirmed(ReadLastConfirmedCallback, java.lang.Object)
*/
public Enumeration<LedgerEntry> readUnconfirmedEntries(long firstEntry, long lastEntry)
throws InterruptedException, BKException {
CompletableFuture<Enumeration<LedgerEntry>> result = new CompletableFuture<>();
asyncReadUnconfirmedEntries(firstEntry, lastEntry, new SyncReadCallback(result), null);
return SyncCallbackUtils.waitForResult(result);
}
/**
* Read a sequence of entries asynchronously.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence
* @param cb
* object implementing read callback interface
* @param ctx
* control object
*/
public void asyncReadEntries(long firstEntry, long lastEntry, ReadCallback cb, Object ctx) {
// Little sanity check
if (firstEntry < 0 || firstEntry > lastEntry) {
LOG.error("IncorrectParameterException on ledgerId:{} firstEntry:{} lastEntry:{}",
ledgerId, firstEntry, lastEntry);
cb.readComplete(BKException.Code.IncorrectParameterException, this, null, ctx);
return;
}
if (lastEntry > lastAddConfirmed) {
LOG.error("ReadEntries exception on ledgerId:{} firstEntry:{} lastEntry:{} lastAddConfirmed:{}",
ledgerId, firstEntry, lastEntry, lastAddConfirmed);
cb.readComplete(BKException.Code.ReadException, this, null, ctx);
return;
}
asyncReadEntriesInternal(firstEntry, lastEntry, cb, ctx, false);
}
/**
* Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
* <br>This is the same of
* {@link #asyncReadEntries(long, long, ReadCallback, Object) }
* but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
* read entries for which the writer has not received the acknowledge yet. <br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
* received the acknowledge.<br>
* For entries outside that range it is possible that the writer never received the acknowledge
* and so there is the risk that the reader is seeing entries before the writer and this could result in
* a consistency issue in some cases.<br>
* With this method you can even read entries before the LastAddConfirmed and entries after it with one call,
* the expected consistency will be as described above for each subrange of ids.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence
* @param cb
* object implementing read callback interface
* @param ctx
* control object
*
* @see #asyncReadEntries(long, long, ReadCallback, Object)
* @see #asyncReadLastConfirmed(ReadLastConfirmedCallback, Object)
* @see #readUnconfirmedEntries(long, long)
*/
public void asyncReadUnconfirmedEntries(long firstEntry, long lastEntry, ReadCallback cb, Object ctx) {
// Little sanity check
if (firstEntry < 0 || firstEntry > lastEntry) {
LOG.error("IncorrectParameterException on ledgerId:{} firstEntry:{} lastEntry:{}",
ledgerId, firstEntry, lastEntry);
cb.readComplete(BKException.Code.IncorrectParameterException, this, null, ctx);
return;
}
asyncReadEntriesInternal(firstEntry, lastEntry, cb, ctx, false);
}
/**
* Read a sequence of entries asynchronously.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence
*/
@Override
public CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry) {
// Little sanity check
if (firstEntry < 0 || firstEntry > lastEntry) {
LOG.error("IncorrectParameterException on ledgerId:{} firstEntry:{} lastEntry:{}",
ledgerId, firstEntry, lastEntry);
return FutureUtils.exception(new BKIncorrectParameterException());
}
if (lastEntry > lastAddConfirmed) {
LOG.error("ReadAsync exception on ledgerId:{} firstEntry:{} lastEntry:{} lastAddConfirmed:{}",
ledgerId, firstEntry, lastEntry, lastAddConfirmed);
return FutureUtils.exception(new BKReadException());
}
return readEntriesInternalAsync(firstEntry, lastEntry, false);
}
/**
* Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
* <br>This is the same of
* {@link #asyncReadEntries(long, long, ReadCallback, Object) }
* but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
* read entries for which the writer has not received the acknowledge yet. <br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
* received the acknowledge.<br>
* For entries outside that range it is possible that the writer never received the acknowledge
* and so there is the risk that the reader is seeing entries before the writer and this could result in
* a consistency issue in some cases.<br>
* With this method you can even read entries before the LastAddConfirmed and entries after it with one call,
* the expected consistency will be as described above for each subrange of ids.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence
*
* @see #asyncReadEntries(long, long, ReadCallback, Object)
* @see #asyncReadLastConfirmed(ReadLastConfirmedCallback, Object)
* @see #readUnconfirmedEntries(long, long)
*/
@Override
public CompletableFuture<LedgerEntries> readUnconfirmedAsync(long firstEntry, long lastEntry) {
// Little sanity check
if (firstEntry < 0 || firstEntry > lastEntry) {
LOG.error("IncorrectParameterException on ledgerId:{} firstEntry:{} lastEntry:{}",
ledgerId, firstEntry, lastEntry);
return FutureUtils.exception(new BKIncorrectParameterException());
}
return readEntriesInternalAsync(firstEntry, lastEntry, false);
}
void asyncReadEntriesInternal(long firstEntry, long lastEntry, ReadCallback cb,
Object ctx, boolean isRecoveryRead) {
if (!clientCtx.isClientClosed()) {
readEntriesInternalAsync(firstEntry, lastEntry, isRecoveryRead)
.whenCompleteAsync(new FutureEventListener<LedgerEntries>() {
@Override
public void onSuccess(LedgerEntries entries) {
cb.readComplete(
Code.OK,
LedgerHandle.this,
IteratorUtils.asEnumeration(
Iterators.transform(entries.iterator(), le -> {
LedgerEntry entry = new LedgerEntry((LedgerEntryImpl) le);
le.close();
return entry;
})),
ctx);
}
@Override
public void onFailure(Throwable cause) {
if (cause instanceof BKException) {
BKException bke = (BKException) cause;
cb.readComplete(bke.getCode(), LedgerHandle.this, null, ctx);
} else {
cb.readComplete(Code.UnexpectedConditionException, LedgerHandle.this, null, ctx);
}
}
}, clientCtx.getMainWorkerPool().chooseThread(ledgerId));
} else {
cb.readComplete(Code.ClientClosedException, LedgerHandle.this, null, ctx);
}
}
/*
* Read the last entry in the ledger
*
* @param cb
* object implementing read callback interface
* @param ctx
* control object
*/
public void asyncReadLastEntry(ReadCallback cb, Object ctx) {
long lastEntryId = getLastAddConfirmed();
if (lastEntryId < 0) {
// Ledger was empty, so there is no last entry to read
cb.readComplete(BKException.Code.NoSuchEntryException, this, null, ctx);
} else {
asyncReadEntriesInternal(lastEntryId, lastEntryId, cb, ctx, false);
}
}
public LedgerEntry readLastEntry()
throws InterruptedException, BKException {
long lastEntryId = getLastAddConfirmed();
if (lastEntryId < 0) {
// Ledger was empty, so there is no last entry to read
throw new BKException.BKNoSuchEntryException();
} else {
CompletableFuture<Enumeration<LedgerEntry>> result = new CompletableFuture<>();
asyncReadEntries(lastEntryId, lastEntryId, new SyncReadCallback(result), null);
return SyncCallbackUtils.waitForResult(result).nextElement();
}
}
CompletableFuture<LedgerEntries> readEntriesInternalAsync(long firstEntry,
long lastEntry,
boolean isRecoveryRead) {
PendingReadOp op = new PendingReadOp(this, clientCtx,
firstEntry, lastEntry, isRecoveryRead);
if (!clientCtx.isClientClosed()) {
// Waiting on the first one.
// This is not very helpful if there are multiple ensembles or if bookie goes into unresponsive
// state later after N requests sent.
// Unfortunately it seems that alternatives are:
// - send reads one-by-one (up to the app)
// - rework LedgerHandle to send requests one-by-one (maybe later, potential perf impact)
// - block worker pool (not good)
// Even with this implementation one should be more concerned about OOME when all read responses arrive
// or about overloading bookies with these requests then about submission of many small requests.
// Naturally one of the solutions would be to submit smaller batches and in this case
// current implementation will prevent next batch from starting when bookie is
// unresponsive thus helpful enough.
if (clientCtx.getConf().waitForWriteSetMs >= 0) {
DistributionSchedule.WriteSet ws = distributionSchedule.getWriteSet(firstEntry);
try {
if (!waitForWritable(ws, ws.size() - 1, clientCtx.getConf().waitForWriteSetMs)) {
op.allowFailFastOnUnwritableChannel();
}
} finally {
ws.recycle();
}
}
if (isHandleWritable()) {
// Ledger handle in read/write mode: submit to OSE for ordered execution.
executeOrdered(op);
} else {
// Read-only ledger handle: bypass OSE and execute read directly in client thread.
// This avoids a context-switch to OSE thread and thus reduces latency.
op.run();
}
} else {
op.future().completeExceptionally(BKException.create(ClientClosedException));
}
return op.future();
}
/**
* Add entry synchronously to an open ledger.
*
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @return the entryId of the new inserted entry
*/
public long addEntry(byte[] data) throws InterruptedException, BKException {
return addEntry(data, 0, data.length);
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<Long> appendAsync(ByteBuf data) {
SyncAddCallback callback = new SyncAddCallback();
asyncAddEntry(data, callback, null);
return callback;
}
/**
* Add entry synchronously to an open ledger. This can be used only with
* {@link LedgerHandleAdv} returned through ledgers created with {@link
* BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
*
* @param entryId
* entryId to be added
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @return the entryId of the new inserted entry
*/
public long addEntry(final long entryId, byte[] data) throws InterruptedException, BKException {
LOG.error("To use this feature Ledger must be created with createLedgerAdv interface.");
throw BKException.create(BKException.Code.IllegalOpException);
}
/**
* Add entry synchronously to an open ledger.
*
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @return the entryId of the new inserted entry
*/
public long addEntry(byte[] data, int offset, int length)
throws InterruptedException, BKException {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding entry {}", data);
}
SyncAddCallback callback = new SyncAddCallback();
asyncAddEntry(data, offset, length, callback, null);
return SyncCallbackUtils.waitForResult(callback);
}
/**
* Add entry synchronously to an open ledger. This can be used only with
* {@link LedgerHandleAdv} returned through ledgers created with {@link
* BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
* @param entryId
* entryId to be added.
* @param data
* array of bytes to be written to the ledger
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @return entryId
*/
public long addEntry(final long entryId, byte[] data, int offset, int length) throws InterruptedException,
BKException {
LOG.error("To use this feature Ledger must be created with createLedgerAdv() interface.");
throw BKException.create(BKException.Code.IllegalOpException);
}
/**
* Add entry asynchronously to an open ledger.
*
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
*/
public void asyncAddEntry(final byte[] data, final AddCallback cb,
final Object ctx) {
asyncAddEntry(data, 0, data.length, cb, ctx);
}
/**
* Add entry asynchronously to an open ledger. This can be used only with
* {@link LedgerHandleAdv} returned through ledgers created with {@link
* BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
* @param entryId
* entryId to be added
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
*/
public void asyncAddEntry(final long entryId, final byte[] data, final AddCallback cb, final Object ctx) {
LOG.error("To use this feature Ledger must be created with createLedgerAdv() interface.");
cb.addCompleteWithLatency(BKException.Code.IllegalOpException, LedgerHandle.this, entryId, 0, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
*
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
* @throws ArrayIndexOutOfBoundsException if offset or length is negative or
* offset and length sum to a value higher than the length of data.
*/
public void asyncAddEntry(final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
if (offset < 0 || length < 0 || (offset + length) > data.length) {
throw new ArrayIndexOutOfBoundsException(
"Invalid values for offset(" + offset
+ ") or length(" + length + ")");
}
asyncAddEntry(Unpooled.wrappedBuffer(data, offset, length), cb, ctx);
}
public void asyncAddEntry(ByteBuf data, final AddCallback cb, final Object ctx) {
PendingAddOp op = PendingAddOp.create(this, clientCtx, getCurrentEnsemble(), data, writeFlags, cb, ctx);
doAsyncAddEntry(op);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
* This can be used only with {@link LedgerHandleAdv} returned through
* ledgers created with
* {@link BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
* @param entryId
* entryId of the entry to add.
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
* @throws ArrayIndexOutOfBoundsException
* if offset or length is negative or offset and length sum to a
* value higher than the length of data.
*/
public void asyncAddEntry(final long entryId, final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
LOG.error("To use this feature Ledger must be created with createLedgerAdv() interface.");
cb.addCompleteWithLatency(BKException.Code.IllegalOpException, LedgerHandle.this, entryId, 0, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
*
* @param entryId
* entryId of the entry to add
* @param data
* array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param offset
* offset from which to take bytes from data
* @param length
* number of bytes to take from data
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
* @throws ArrayIndexOutOfBoundsException
* if offset or length is negative or offset and length sum to a
* value higher than the length of data.
*/
public void asyncAddEntry(final long entryId, final byte[] data, final int offset, final int length,
final AddCallbackWithLatency cb, final Object ctx) {
LOG.error("To use this feature Ledger must be created with createLedgerAdv() interface.");
cb.addCompleteWithLatency(BKException.Code.IllegalOpException, LedgerHandle.this, entryId, 0, ctx);
}
/**
* Add entry asynchronously to an open ledger, using an offset and range.
* This can be used only with {@link LedgerHandleAdv} returned through
* ledgers created with {@link BookKeeper#createLedgerAdv(int, int, int, BookKeeper.DigestType, byte[])}.
*
* @param entryId
* entryId of the entry to add.
* @param data
* io.netty.buffer.ByteBuf of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately
* @param cb
* object implementing callbackinterface
* @param ctx
* some control object
*/
public void asyncAddEntry(final long entryId, ByteBuf data,
final AddCallbackWithLatency cb, final Object ctx) {
LOG.error("To use this feature Ledger must be created with createLedgerAdv() interface.");
cb.addCompleteWithLatency(BKException.Code.IllegalOpException, LedgerHandle.this, entryId, 0, ctx);
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<Void> force() {
CompletableFuture<Void> result = new CompletableFuture<>();
ForceLedgerOp op = new ForceLedgerOp(this, clientCtx.getBookieClient(), getCurrentEnsemble(), result);
boolean wasClosed = false;
synchronized (this) {
// synchronized on this to ensure that
// the ledger isn't closed between checking and
// updating lastAddPushed
if (!isHandleWritable()) {
wasClosed = true;
}
}
if (wasClosed) {
// make sure the callback is triggered in main worker pool
try {
executeOrdered(new Runnable() {
@Override
public void run() {
LOG.warn("Force() attempted on a closed ledger: {}", ledgerId);
result.completeExceptionally(new BKException.BKLedgerClosedException());
}
@Override
public String toString() {
return String.format("force(lid=%d)", ledgerId);
}
});
} catch (RejectedExecutionException e) {
result.completeExceptionally(new BKException.BKInterruptedException());
}
return result;
}
// early exit: no write has been issued yet
if (pendingAddsSequenceHead == INVALID_ENTRY_ID) {
executeOrdered(new Runnable() {
@Override
public void run() {
FutureUtils.complete(result, null);
}
@Override
public String toString() {
return String.format("force(lid=%d)", ledgerId);
}
});
return result;
}
try {
executeOrdered(op);
} catch (RejectedExecutionException e) {
result.completeExceptionally(new BKException.BKInterruptedException());
}
return result;
}
/**
* Make a recovery add entry request. Recovery adds can add to a ledger even
* if it has been fenced.
*
* <p>This is only valid for bookie and ledger recovery, which may need to replicate
* entries to a quorum of bookies to ensure data safety.
*
* <p>Normal client should never call this method.
*/
void asyncRecoveryAddEntry(final byte[] data, final int offset, final int length,
final AddCallback cb, final Object ctx) {
PendingAddOp op = PendingAddOp.create(this, clientCtx, getCurrentEnsemble(),
Unpooled.wrappedBuffer(data, offset, length),
writeFlags, cb, ctx)
.enableRecoveryAdd();
doAsyncAddEntry(op);
}
private boolean isWriteSetWritable(DistributionSchedule.WriteSet writeSet,
int allowedNonWritableCount) {
if (allowedNonWritableCount < 0) {
allowedNonWritableCount = 0;
}
final int sz = writeSet.size();
final int requiredWritable = sz - allowedNonWritableCount;
int nonWritableCount = 0;
List<BookieId> currentEnsemble = getCurrentEnsemble();
for (int i = 0; i < sz; i++) {
int writeBookieIndex = writeSet.get(i);
if (writeBookieIndex < currentEnsemble.size()
&& !clientCtx.getBookieClient().isWritable(currentEnsemble.get(writeBookieIndex), ledgerId)) {
nonWritableCount++;
if (nonWritableCount >= allowedNonWritableCount) {
return false;
}
} else {
final int knownWritable = i - nonWritableCount;
if (knownWritable >= requiredWritable) {
return true;
}
}
}
return true;
}
@VisibleForTesting
protected boolean waitForWritable(DistributionSchedule.WriteSet writeSet,
int allowedNonWritableCount, long durationMs) {
if (durationMs < 0) {
return true;
}
final long startTime = MathUtils.nowInNano();
boolean writableResult = isWriteSetWritable(writeSet, allowedNonWritableCount);
if (!writableResult && durationMs > 0) {
int backoff = 1;
final int maxBackoff = 4;
final long deadline = startTime + TimeUnit.MILLISECONDS.toNanos(durationMs);
while (!(writableResult = isWriteSetWritable(writeSet, allowedNonWritableCount))) {
if (MathUtils.nowInNano() < deadline) {
long maxSleep = MathUtils.elapsedMSec(startTime);
if (maxSleep < 0) {
maxSleep = 1;
}
long sleepMs = Math.min(backoff, maxSleep);
try {
TimeUnit.MILLISECONDS.sleep(sleepMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
writableResult = isWriteSetWritable(writeSet, allowedNonWritableCount);
break;
}
if (backoff <= maxBackoff) {
backoff++;
}
} else {
writableResult = false;
break;
}
}
if (backoff > 1) {
LOG.info("Spent {} ms waiting for {} writable channels, writable result {}",
MathUtils.elapsedMSec(startTime),
writeSet.size() - allowedNonWritableCount,
writableResult);
}
}
if (writableResult) {
clientChannelWriteWaitStats.registerSuccessfulEvent(
MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
} else {
clientChannelWriteWaitStats.registerFailedEvent(
MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
return writableResult;
}
protected void doAsyncAddEntry(final PendingAddOp op) {
if (throttler != null) {
throttler.acquire();
}
boolean wasClosed = false;
synchronized (this) {
// synchronized on this to ensure that
// the ledger isn't closed between checking and
// updating lastAddPushed
if (isHandleWritable()) {
long entryId = ++lastAddPushed;
long currentLedgerLength = addToLength(op.payload.readableBytes());
op.setEntryId(entryId);
op.setLedgerLength(currentLedgerLength);
pendingAddOps.add(op);
} else {
wasClosed = true;
}
}
if (wasClosed) {
// make sure the callback is triggered in main worker pool
try {
executeOrdered(new Runnable() {
@Override
public void run() {
LOG.warn("Attempt to add to closed ledger: {}", ledgerId);
op.cb.addCompleteWithLatency(BKException.Code.LedgerClosedException,
LedgerHandle.this, INVALID_ENTRY_ID, 0, op.ctx);
op.recyclePendAddOpObject();
}
@Override
public String toString() {
return String.format("AsyncAddEntryToClosedLedger(lid=%d)", ledgerId);
}
});
} catch (RejectedExecutionException e) {
op.cb.addCompleteWithLatency(BookKeeper.getReturnRc(clientCtx.getBookieClient(),
BKException.Code.InterruptedException),
LedgerHandle.this, INVALID_ENTRY_ID, 0, op.ctx);
op.recyclePendAddOpObject();
}
return;
}
if (clientCtx.getConf().waitForWriteSetMs >= 0) {
DistributionSchedule.WriteSet ws = distributionSchedule.getWriteSet(op.getEntryId());
try {
if (!waitForWritable(ws, 0, clientCtx.getConf().waitForWriteSetMs)) {
op.allowFailFastOnUnwritableChannel();
}
} finally {
ws.recycle();
}
}
op.initiate();
}
synchronized void updateLastConfirmed(long lac, long len) {
if (lac > lastAddConfirmed) {
lastAddConfirmed = lac;
lacUpdateHitsCounter.inc();
} else {
lacUpdateMissesCounter.inc();
}
lastAddPushed = Math.max(lastAddPushed, lac);
length = Math.max(length, len);
}
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies. This
* call obtains the last add confirmed each bookie has received for this ledger
* and returns the maximum. If the ledger has been closed, the value returned by this
* call may not correspond to the id of the last entry of the ledger, since it reads
* the hint of bookies. Consequently, in the case the ledger has been closed, it may
* return a different value than getLastAddConfirmed, which returns the local value
* of the ledger handle.
*
* @see #getLastAddConfirmed()
*
* @param cb
* @param ctx
*/
public void asyncReadLastConfirmed(final ReadLastConfirmedCallback cb, final Object ctx) {
if (clientCtx.getConf().useV2WireProtocol) {
// in v2 protocol we don't support readLAC RPC
asyncReadPiggybackLastConfirmed(cb, ctx);
} else {
asyncReadExplicitLastConfirmed(cb, ctx);
}
}
private void asyncReadPiggybackLastConfirmed(final ReadLastConfirmedCallback cb, final Object ctx) {
boolean isClosed;
long lastEntryId;
synchronized (this) {
LedgerMetadata metadata = getLedgerMetadata();
isClosed = metadata.isClosed();
lastEntryId = metadata.getLastEntryId();
}
if (isClosed) {
cb.readLastConfirmedComplete(BKException.Code.OK, lastEntryId, ctx);
return;
}
ReadLastConfirmedOp.LastConfirmedDataCallback innercb = new ReadLastConfirmedOp.LastConfirmedDataCallback() {
@Override
public void readLastConfirmedDataComplete(int rc, DigestManager.RecoveryData data) {
if (rc == BKException.Code.OK) {
updateLastConfirmed(data.getLastAddConfirmed(), data.getLength());
cb.readLastConfirmedComplete(rc, data.getLastAddConfirmed(), ctx);
} else {
cb.readLastConfirmedComplete(rc, INVALID_ENTRY_ID, ctx);
}
}
};
new ReadLastConfirmedOp(clientCtx.getBookieClient(),
distributionSchedule,
macManager,
ledgerId,
getCurrentEnsemble(),
ledgerKey,
innercb).initiate();
}
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies.
* It is similar as
* {@link #asyncReadLastConfirmed(org.apache.bookkeeper.client.AsyncCallback.ReadLastConfirmedCallback, Object)},
* but it doesn't wait all the responses from the quorum. It would callback
* immediately if it received a LAC which is larger than current LAC.
*
* @see #asyncTryReadLastConfirmed(org.apache.bookkeeper.client.AsyncCallback.ReadLastConfirmedCallback, Object)
*
* @param cb
* callback to return read last confirmed
* @param ctx
* callback context
*/
public void asyncTryReadLastConfirmed(final ReadLastConfirmedCallback cb, final Object ctx) {
boolean isClosed;
long lastEntryId;
synchronized (this) {
LedgerMetadata metadata = getLedgerMetadata();
isClosed = metadata.isClosed();
lastEntryId = metadata.getLastEntryId();
}
if (isClosed) {
cb.readLastConfirmedComplete(BKException.Code.OK, lastEntryId, ctx);
return;
}
ReadLastConfirmedOp.LastConfirmedDataCallback innercb = new ReadLastConfirmedOp.LastConfirmedDataCallback() {
AtomicBoolean completed = new AtomicBoolean(false);
@Override
public void readLastConfirmedDataComplete(int rc, DigestManager.RecoveryData data) {
if (rc == BKException.Code.OK) {
updateLastConfirmed(data.getLastAddConfirmed(), data.getLength());
if (completed.compareAndSet(false, true)) {
cb.readLastConfirmedComplete(rc, data.getLastAddConfirmed(), ctx);
}
} else {
if (completed.compareAndSet(false, true)) {
cb.readLastConfirmedComplete(rc, INVALID_ENTRY_ID, ctx);
}
}
}
};
new TryReadLastConfirmedOp(this, clientCtx.getBookieClient(), getCurrentEnsemble(),
innercb, getLastAddConfirmed()).initiate();
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<Long> tryReadLastAddConfirmedAsync() {
FutureReadLastConfirmed result = new FutureReadLastConfirmed();
asyncTryReadLastConfirmed(result, null);
return result;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<Long> readLastAddConfirmedAsync() {
FutureReadLastConfirmed result = new FutureReadLastConfirmed();
asyncReadLastConfirmed(result, null);
return result;
}
/**
* {@inheritDoc}
*/
@Override
public CompletableFuture<LastConfirmedAndEntry> readLastAddConfirmedAndEntryAsync(long entryId,
long timeOutInMillis,
boolean parallel) {
FutureReadLastConfirmedAndEntry result = new FutureReadLastConfirmedAndEntry();
asyncReadLastConfirmedAndEntry(entryId, timeOutInMillis, parallel, result, null);
return result;
}
/**
* Asynchronous read next entry and the latest last add confirmed.
* If the next entryId is less than known last add confirmed, the call will read next entry directly.
* If the next entryId is ahead of known last add confirmed, the call will issue a long poll read
* to wait for the next entry <i>entryId</i>.
*
* <p>The callback will return the latest last add confirmed and next entry if it is available within timeout
* period <i>timeOutInMillis</i>.
*
* @param entryId
* next entry id to read
* @param timeOutInMillis
* timeout period to wait for the entry id to be available (for long poll only)
* @param parallel
* whether to issue the long poll reads in parallel
* @param cb
* callback to return the result
* @param ctx
* callback context
*/
public void asyncReadLastConfirmedAndEntry(final long entryId,
final long timeOutInMillis,
final boolean parallel,
final AsyncCallback.ReadLastConfirmedAndEntryCallback cb,
final Object ctx) {
boolean isClosed;
long lac;
synchronized (this) {
LedgerMetadata metadata = getLedgerMetadata();
isClosed = metadata.isClosed();
lac = metadata.getLastEntryId();
}
if (isClosed) {
if (entryId > lac) {
cb.readLastConfirmedAndEntryComplete(BKException.Code.OK, lac, null, ctx);
return;
}
} else {
lac = getLastAddConfirmed();
}
if (entryId <= lac) {
asyncReadEntries(entryId, entryId, new ReadCallback() {
@Override
public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
if (BKException.Code.OK == rc) {
if (seq.hasMoreElements()) {
cb.readLastConfirmedAndEntryComplete(rc, getLastAddConfirmed(), seq.nextElement(), ctx);
} else {
cb.readLastConfirmedAndEntryComplete(rc, getLastAddConfirmed(), null, ctx);
}
} else {
cb.readLastConfirmedAndEntryComplete(rc, INVALID_ENTRY_ID, null, ctx);
}
}
}, ctx);
return;
}
// wait for entry <i>entryId</i>
ReadLastConfirmedAndEntryOp.LastConfirmedAndEntryCallback innercb =
new ReadLastConfirmedAndEntryOp.LastConfirmedAndEntryCallback() {
AtomicBoolean completed = new AtomicBoolean(false);
@Override
public void readLastConfirmedAndEntryComplete(int rc, long lastAddConfirmed, LedgerEntry entry) {
if (rc == BKException.Code.OK) {
if (completed.compareAndSet(false, true)) {
cb.readLastConfirmedAndEntryComplete(rc, lastAddConfirmed, entry, ctx);
}
} else {
if (completed.compareAndSet(false, true)) {
cb.readLastConfirmedAndEntryComplete(rc, INVALID_ENTRY_ID, null, ctx);
}
}
}
};
new ReadLastConfirmedAndEntryOp(this, clientCtx, getCurrentEnsemble(), innercb, entryId - 1, timeOutInMillis)
.parallelRead(parallel)
.initiate();
}
/**
* Context objects for synchronous call to read last confirmed.
*/
static class LastConfirmedCtx {
static final long ENTRY_ID_PENDING = -10;
long response;
int rc;
LastConfirmedCtx() {
this.response = ENTRY_ID_PENDING;
}
void setLastConfirmed(long lastConfirmed) {
this.response = lastConfirmed;
}
long getlastConfirmed() {
return this.response;
}
void setRC(int rc) {
this.rc = rc;
}
int getRC() {
return this.rc;
}
boolean ready() {
return (this.response != ENTRY_ID_PENDING);
}
}
/**
* Obtains synchronously the last confirmed write from a quorum of bookies. This call
* obtains the last add confirmed each bookie has received for this ledger
* and returns the maximum. If the ledger has been closed, the value returned by this
* call may not correspond to the id of the last entry of the ledger, since it reads
* the hint of bookies. Consequently, in the case the ledger has been closed, it may
* return a different value than getLastAddConfirmed, which returns the local value
* of the ledger handle.
*
* @see #getLastAddConfirmed()
*
* @return The entry id of the last confirmed write or {@link #INVALID_ENTRY_ID INVALID_ENTRY_ID}
* if no entry has been confirmed
* @throws InterruptedException
* @throws BKException
*/
public long readLastConfirmed()
throws InterruptedException, BKException {
LastConfirmedCtx ctx = new LastConfirmedCtx();
asyncReadLastConfirmed(new SyncReadLastConfirmedCallback(), ctx);
synchronized (ctx) {
while (!ctx.ready()) {
ctx.wait();
}
}
if (ctx.getRC() != BKException.Code.OK) {
throw BKException.create(ctx.getRC());
}
return ctx.getlastConfirmed();
}
/**
* Obtains synchronously the last confirmed write from a quorum of bookies.
* It is similar as {@link #readLastConfirmed()}, but it doesn't wait all the responses
* from the quorum. It would callback immediately if it received a LAC which is larger
* than current LAC.
*
* @see #readLastConfirmed()
*
* @return The entry id of the last confirmed write or {@link #INVALID_ENTRY_ID INVALID_ENTRY_ID}
* if no entry has been confirmed
* @throws InterruptedException
* @throws BKException
*/
public long tryReadLastConfirmed() throws InterruptedException, BKException {
LastConfirmedCtx ctx = new LastConfirmedCtx();
asyncTryReadLastConfirmed(new SyncReadLastConfirmedCallback(), ctx);
synchronized (ctx) {
while (!ctx.ready()) {
ctx.wait();
}
}
if (ctx.getRC() != BKException.Code.OK) {
throw BKException.create(ctx.getRC());
}
return ctx.getlastConfirmed();
}
/**
* Obtains asynchronously the explicit last add confirmed from a quorum of
* bookies. This call obtains Explicit LAC value and piggy-backed LAC value (just like
* {@link #asyncReadLastConfirmed(ReadLastConfirmedCallback, Object)}) from each
* bookie in the ensemble and returns the maximum.
* If in the write LedgerHandle, explicitLAC feature is not enabled then this call behavior
* will be similar to {@link #asyncReadLastConfirmed(ReadLastConfirmedCallback, Object)}.
* If the read explicit lastaddconfirmed is greater than getLastAddConfirmed, then it updates the
* lastAddConfirmed of this ledgerhandle. If the ledger has been closed, it
* returns the value of the last add confirmed from the metadata.
*
* @see #getLastAddConfirmed()
*
* @param cb
* callback to return read explicit last confirmed
* @param ctx
* callback context
*/
public void asyncReadExplicitLastConfirmed(final ReadLastConfirmedCallback cb, final Object ctx) {
boolean isClosed;
synchronized (this) {
LedgerMetadata metadata = getLedgerMetadata();
isClosed = metadata.isClosed();
if (isClosed) {
lastAddConfirmed = metadata.getLastEntryId();
length = metadata.getLength();
}
}
if (isClosed) {
cb.readLastConfirmedComplete(BKException.Code.OK, lastAddConfirmed, ctx);
return;
}
PendingReadLacOp.LacCallback innercb = new PendingReadLacOp.LacCallback() {
@Override
public void getLacComplete(int rc, long lac) {
if (rc == BKException.Code.OK) {
// here we are trying to update lac only but not length
updateLastConfirmed(lac, 0);
cb.readLastConfirmedComplete(rc, lac, ctx);
} else {
cb.readLastConfirmedComplete(rc, INVALID_ENTRY_ID, ctx);
}
}
};
new PendingReadLacOp(this, clientCtx.getBookieClient(), getCurrentEnsemble(), innercb).initiate();
}
/*
* Obtains synchronously the explicit last add confirmed from a quorum of
* bookies. This call obtains Explicit LAC value and piggy-backed LAC value (just like
* {@Link #readLastAddConfirmed()) from each bookie in the ensemble and returns the maximum.
* If in the write LedgerHandle, explicitLAC feature is not enabled then this call behavior
* will be similar to {@Link #readLastAddConfirmed()}.
* If the read explicit lastaddconfirmed is greater than getLastAddConfirmed, then it updates the
* lastAddConfirmed of this ledgerhandle. If the ledger has been closed, it
* returns the value of the last add confirmed from the metadata.
*
* @see #getLastAddConfirmed()
*
* @return The entry id of the explicit last confirmed write or
* {@link #INVALID_ENTRY_ID INVALID_ENTRY_ID} if no entry has been
* confirmed.
* @throws InterruptedException
* @throws BKException
*/
public long readExplicitLastConfirmed() throws InterruptedException, BKException {
LastConfirmedCtx ctx = new LastConfirmedCtx();
asyncReadExplicitLastConfirmed(new SyncReadLastConfirmedCallback(), ctx);
synchronized (ctx) {
while (!ctx.ready()) {
ctx.wait();
}
}
if (ctx.getRC() != BKException.Code.OK) {
throw BKException.create(ctx.getRC());
}
return ctx.getlastConfirmed();
}
// close the ledger and send fails to all the adds in the pipeline
void handleUnrecoverableErrorDuringAdd(int rc) {
if (getLedgerMetadata().getState() == LedgerMetadata.State.IN_RECOVERY) {
// we should not close ledger if ledger is recovery mode
// otherwise we may lose entry.
errorOutPendingAdds(rc);
return;
}
LOG.error("Closing ledger {} due to {}", ledgerId, BKException.codeLogger(rc));
asyncCloseInternal(NoopCloseCallback.instance, null, rc);
}
private void monitorPendingAddOps() {
int timedOut = 0;
for (PendingAddOp op : pendingAddOps) {
if (op.maybeTimeout()) {
timedOut++;
}
}
if (timedOut > 0) {
LOG.info("Timed out {} add ops", timedOut);
}
}
void errorOutPendingAdds(int rc) {
errorOutPendingAdds(rc, drainPendingAddsAndAdjustLength());
}
synchronized List<PendingAddOp> drainPendingAddsAndAdjustLength() {
PendingAddOp pendingAddOp;
List<PendingAddOp> opsDrained = new ArrayList<PendingAddOp>(pendingAddOps.size());
while ((pendingAddOp = pendingAddOps.poll()) != null) {
addToLength(-pendingAddOp.entryLength);
opsDrained.add(pendingAddOp);
}
return opsDrained;
}
void errorOutPendingAdds(int rc, List<PendingAddOp> ops) {
for (PendingAddOp op : ops) {
op.submitCallback(rc);
}
}
void sendAddSuccessCallbacks() {
// Start from the head of the queue and proceed while there are
// entries that have had all their responses come back
PendingAddOp pendingAddOp;
while ((pendingAddOp = pendingAddOps.peek()) != null
&& !changingEnsemble) {
if (!pendingAddOp.completed) {
if (LOG.isDebugEnabled()) {
LOG.debug("pending add not completed: {}", pendingAddOp);
}
return;
}
// Check if it is the next entry in the sequence.
if (pendingAddOp.entryId != 0 && pendingAddOp.entryId != pendingAddsSequenceHead + 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("Head of the queue entryId: {} is not the expected value: {}", pendingAddOp.entryId,
pendingAddsSequenceHead + 1);
}
return;
}
pendingAddOps.remove();
explicitLacFlushPolicy.updatePiggyBackedLac(lastAddConfirmed);
pendingAddsSequenceHead = pendingAddOp.entryId;
if (!writeFlags.contains(WriteFlag.DEFERRED_SYNC)) {
this.lastAddConfirmed = pendingAddsSequenceHead;
}
pendingAddOp.submitCallback(BKException.Code.OK);
}
}
@VisibleForTesting
boolean hasDelayedWriteFailedBookies() {
return !delayedWriteFailedBookies.isEmpty();
}
void notifyWriteFailed(int index, BookieId addr) {
synchronized (metadataLock) {
delayedWriteFailedBookies.put(index, addr);
}
}
void maybeHandleDelayedWriteBookieFailure() {
synchronized (metadataLock) {
if (delayedWriteFailedBookies.isEmpty()) {
return;
}
Map<Integer, BookieId> toReplace = new HashMap<>(delayedWriteFailedBookies);
delayedWriteFailedBookies.clear();
// Original intent of this change is to do a best-effort ensemble change.
// But this is not possible until the local metadata is completely immutable.
// Until the feature "Make LedgerMetadata Immutable #610" Is complete we will use
// handleBookieFailure() to handle delayed writes as regular bookie failures.
handleBookieFailure(toReplace);
}
}
void handleBookieFailure(final Map<Integer, BookieId> failedBookies) {
if (clientCtx.getConf().disableEnsembleChangeFeature.isAvailable()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ensemble change is disabled. Retry sending to failed bookies {} for ledger {}.",
failedBookies, ledgerId);
}
executeOrdered(() ->
unsetSuccessAndSendWriteRequest(getCurrentEnsemble(), failedBookies.keySet()));
return;
}
if (writeFlags.contains(WriteFlag.DEFERRED_SYNC)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot perform ensemble change with write flags {}. "
+ "Failed bookies {} for ledger {}.",
writeFlags, failedBookies, ledgerId);
}
handleUnrecoverableErrorDuringAdd(WriteException);
return;
}
boolean triggerLoop = false;
Map<Integer, BookieId> toReplace = null;
List<BookieId> origEnsemble = null;
synchronized (metadataLock) {
if (changingEnsemble) {
delayedWriteFailedBookies.putAll(failedBookies);
} else {
changingEnsemble = true;
triggerLoop = true;
toReplace = new HashMap<>(delayedWriteFailedBookies);
delayedWriteFailedBookies.clear();
toReplace.putAll(failedBookies);
origEnsemble = getCurrentEnsemble();
}
}
if (triggerLoop) {
ensembleChangeLoop(origEnsemble, toReplace);
}
}
void ensembleChangeLoop(List<BookieId> origEnsemble, Map<Integer, BookieId> failedBookies) {
int ensembleChangeId = numEnsembleChanges.incrementAndGet();
String logContext = String.format("[EnsembleChange(ledger:%d, change-id:%010d)]", ledgerId, ensembleChangeId);
// when the ensemble changes are too frequent, close handle
if (ensembleChangeId > clientCtx.getConf().maxAllowedEnsembleChanges) {
LOG.info("{} reaches max allowed ensemble change number {}",
logContext, clientCtx.getConf().maxAllowedEnsembleChanges);
handleUnrecoverableErrorDuringAdd(WriteException);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("{} Replacing {} in {}", logContext, failedBookies, origEnsemble);
}
AtomicInteger attempts = new AtomicInteger(0);
new MetadataUpdateLoop(
clientCtx.getLedgerManager(), getId(),
this::getVersionedLedgerMetadata,
(metadata) -> metadata.getState() == LedgerMetadata.State.OPEN
&& failedBookies.entrySet().stream().anyMatch(
e -> LedgerMetadataUtils.getLastEnsembleValue(metadata)
.get(e.getKey()).equals(e.getValue())),
(metadata) -> {
attempts.incrementAndGet();
List<BookieId> currentEnsemble = getCurrentEnsemble();
List<BookieId> newEnsemble = EnsembleUtils.replaceBookiesInEnsemble(
clientCtx.getBookieWatcher(), metadata, currentEnsemble, failedBookies, logContext);
Long lastEnsembleKey = LedgerMetadataUtils.getLastEnsembleKey(metadata);
LedgerMetadataBuilder builder = LedgerMetadataBuilder.from(metadata);
long newEnsembleStartEntry = getLastAddConfirmed() + 1;
checkState(lastEnsembleKey <= newEnsembleStartEntry,
"New ensemble must either replace the last ensemble, or add a new one");
if (LOG.isDebugEnabled()) {
LOG.debug("{}[attempt:{}] changing ensemble from: {} to: {} starting at entry: {}",
logContext, attempts.get(), currentEnsemble, newEnsemble, newEnsembleStartEntry);
}
if (lastEnsembleKey.equals(newEnsembleStartEntry)) {
return builder.replaceEnsembleEntry(newEnsembleStartEntry, newEnsemble).build();
} else {
return builder.newEnsembleEntry(newEnsembleStartEntry, newEnsemble).build();
}
},
this::setLedgerMetadata)
.run().whenCompleteAsync((metadata, ex) -> {
if (ex != null) {
LOG.warn("{}[attempt:{}] Exception changing ensemble", logContext, attempts.get(), ex);
handleUnrecoverableErrorDuringAdd(BKException.getExceptionCode(ex, WriteException));
} else if (metadata.getValue().isClosed()) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}[attempt:{}] Metadata closed during attempt to replace bookie."
+ " Another client must have recovered the ledger.", logContext, attempts.get());
}
handleUnrecoverableErrorDuringAdd(BKException.Code.LedgerClosedException);
} else if (metadata.getValue().getState() == LedgerMetadata.State.IN_RECOVERY) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}[attempt:{}] Metadata marked as in-recovery during attempt to replace bookie."
+ " Another client must be recovering the ledger.", logContext, attempts.get());
}
handleUnrecoverableErrorDuringAdd(BKException.Code.LedgerFencedException);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("{}[attempt:{}] Success updating metadata.", logContext, attempts.get());
}
List<BookieId> newEnsemble = null;
Set<Integer> replaced = null;
synchronized (metadataLock) {
if (!delayedWriteFailedBookies.isEmpty()) {
Map<Integer, BookieId> toReplace = new HashMap<>(delayedWriteFailedBookies);
delayedWriteFailedBookies.clear();
ensembleChangeLoop(origEnsemble, toReplace);
} else {
newEnsemble = getCurrentEnsemble();
replaced = EnsembleUtils.diffEnsemble(origEnsemble, newEnsemble);
LOG.info("New Ensemble: {} for ledger: {}", newEnsemble, ledgerId);
changingEnsemble = false;
}
}
if (newEnsemble != null) { // unsetSuccess outside of lock
unsetSuccessAndSendWriteRequest(newEnsemble, replaced);
}
}
}, clientCtx.getMainWorkerPool().chooseThread(ledgerId));
}
void unsetSuccessAndSendWriteRequest(List<BookieId> ensemble, final Set<Integer> bookies) {
for (PendingAddOp pendingAddOp : pendingAddOps) {
for (Integer bookieIndex: bookies) {
pendingAddOp.unsetSuccessAndSendWriteRequest(ensemble, bookieIndex);
}
}
}
void registerOperationFailureOnBookie(BookieId bookie, long entryId) {
if (clientCtx.getConf().enableBookieFailureTracking) {
bookieFailureHistory.put(bookie, entryId);
}
}
static class NoopCloseCallback implements CloseCallback {
static NoopCloseCallback instance = new NoopCloseCallback();
@Override
public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
if (rc != BKException.Code.OK) {
LOG.warn("Close failed: {}", BKException.codeLogger(rc));
}
// noop
}
}
/**
* Get the current ensemble from the ensemble list. The current ensemble
* is the last ensemble in the list. The ledger handle uses this ensemble when
* triggering operations which work on the end of the ledger, such as adding new
* entries or reading the last add confirmed.
*
* <p>This method is also used by ReadOnlyLedgerHandle during recovery, and when
* tailing a ledger.
*
* <p>Generally, this method should only be called by LedgerHandle and not by the
* operations themselves, to avoid adding more dependencies between the classes.
* There are too many already.
*/
List<BookieId> getCurrentEnsemble() {
// Getting current ensemble from the metadata is only a temporary
// thing until metadata is immutable. At that point, current ensemble
// becomes a property of the LedgerHandle itself.
return LedgerMetadataUtils.getCurrentEnsemble(versionedMetadata.getValue());
}
/**
* Return a {@link WriteSet} suitable for reading a particular entry.
* This will include all bookies that are cotna
*/
WriteSet getWriteSetForReadOperation(long entryId) {
if (stickyBookieIndex != STICKY_READ_BOOKIE_INDEX_UNSET) {
// When sticky reads are enabled we want to make sure to take
// advantage of read-ahead (or, anyway, from efficiencies in
// reading sequential data from disk through the page cache).
// For this, all the entries that a given bookie prefetches,
// should read from that bookie.
// For example, with e=2, w=2, a=2 we would have
// B-1 B-2
// e-0 X X
// e-1 X X
// e-2 X X
//
// In this case we want all the requests to be issued to B-1 (by
// preference), so that cache hits will be maximized.
//
// We can only enable sticky reads if the ensemble==writeQuorum
// otherwise the same bookie will not have all the entries
// stored
return distributionSchedule.getWriteSet(stickyBookieIndex);
} else {
return distributionSchedule.getWriteSet(entryId);
}
}
/**
* Execute the callback in the thread pinned to the ledger.
* @param runnable
* @throws RejectedExecutionException
*/
void executeOrdered(Runnable runnable) throws RejectedExecutionException {
executor.execute(runnable);
}
}
| 365 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookieInfoReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.client.WeightedRandomSelection.WeightedObject;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GetBookieInfoCallback;
import org.apache.bookkeeper.proto.BookkeeperProtocol;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A utility class to read {@link BookieInfo} from bookies.
*
* <p>NOTE: This class is tended to be used by this project only. External users should not rely on it directly.
*/
public class BookieInfoReader {
private static final Logger LOG = LoggerFactory.getLogger(BookieInfoReader.class);
private static final long GET_BOOKIE_INFO_REQUEST_FLAGS =
BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE
| BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE;
private final ScheduledExecutorService scheduler;
private final BookKeeper bk;
private final ClientConfiguration conf;
/**
* A class represents the information (e.g. disk usage, load) of a bookie.
*
* <p>NOTE: This class is tended to be used by this project only. External users should not rely on it directly.
*/
public static class BookieInfo implements WeightedObject {
private final long freeDiskSpace;
private final long totalDiskSpace;
public BookieInfo() {
this(0L, 0L);
}
public BookieInfo(long totalDiskSpace, long freeDiskSpace) {
this.totalDiskSpace = totalDiskSpace;
this.freeDiskSpace = freeDiskSpace;
}
public long getFreeDiskSpace() {
return freeDiskSpace;
}
public long getTotalDiskSpace() {
return totalDiskSpace;
}
@Override
public long getWeight() {
return freeDiskSpace;
}
@Override
public String toString() {
return "FreeDiskSpace: " + this.freeDiskSpace + " TotalDiskCapacity: " + this.totalDiskSpace;
}
}
/**
* Tracks the most recently reported set of bookies from BookieWatcher as well
* as current BookieInfo for bookies we've successfully queried.
*/
private static class BookieInfoMap {
/**
* Contains the most recently obtained information on the contained bookies.
* When an error happens querying a bookie, the entry is removed.
*/
private final Map<BookieId, BookieInfo> infoMap = new HashMap<>();
/**
* Contains the most recently reported set of bookies from BookieWatcher
* A partial query consists of every member of mostRecentlyReportedBookies
* minus the entries in bookieInfoMap.
*/
private Collection<BookieId> mostRecentlyReportedBookies = new ArrayList<>();
public void updateBookies(Collection<BookieId> updatedBookieSet) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"updateBookies: current: {}, new: {}",
mostRecentlyReportedBookies, updatedBookieSet);
}
infoMap.keySet().retainAll(updatedBookieSet);
mostRecentlyReportedBookies = updatedBookieSet;
}
@SuppressWarnings("unchecked")
public Collection<BookieId> getPartialScanTargets() {
return CollectionUtils.subtract(mostRecentlyReportedBookies, infoMap.keySet());
}
public Collection<BookieId> getFullScanTargets() {
return mostRecentlyReportedBookies;
}
/**
* Returns info for bookie, null if not known.
*
* @param bookie bookie for which to get info
* @return Info for bookie, null otherwise
*/
public BookieInfo getInfo(BookieId bookie) {
return infoMap.get(bookie);
}
/**
* Removes bookie from bookieInfoMap.
*
* @param bookie bookie on which we observed an error
*/
public void clearInfo(BookieId bookie) {
infoMap.remove(bookie);
}
/**
* Report new info on bookie.
*
* @param bookie bookie for which we obtained new info
* @param info the new info
*/
public void gotInfo(BookieId bookie, BookieInfo info) {
infoMap.put(bookie, info);
}
/**
* Get bookie info map.
*/
public Map<BookieId, BookieInfo> getBookieMap() {
return infoMap;
}
}
private final BookieInfoMap bookieInfoMap = new BookieInfoMap();
/**
* Tracks whether there is an execution in progress as well as whether
* another is pending.
*/
public enum State { UNQUEUED, PARTIAL, FULL }
private static class InstanceState {
private boolean running = false;
private State queuedType = State.UNQUEUED;
private boolean shouldStart() {
if (running) {
return false;
} else {
running = true;
return true;
}
}
/**
* Mark pending operation FULL and return true if there is no in-progress operation.
*
* @return True if we should execute a scan, False if there is already one running
*/
public boolean tryStartFull() {
queuedType = State.FULL;
return shouldStart();
}
/**
* Mark pending operation PARTIAL if not full and return true if there is no in-progress operation.
*
* @return True if we should execute a scan, False if there is already one running
*/
public boolean tryStartPartial() {
if (queuedType == State.UNQUEUED) {
queuedType = State.PARTIAL;
}
return shouldStart();
}
/**
* Gets and clears queuedType.
*/
public State getAndClearQueuedType() {
State ret = queuedType;
queuedType = State.UNQUEUED;
return ret;
}
/**
* If queuedType != UNQUEUED, returns true, leaves running equal to true
* Otherwise, returns false and sets running to false.
*/
public boolean completeUnlessQueued() {
if (queuedType == State.UNQUEUED) {
running = false;
return false;
} else {
return true;
}
}
}
private final InstanceState instanceState = new InstanceState();
BookieInfoReader(BookKeeper bk,
ClientConfiguration conf,
ScheduledExecutorService scheduler) {
this.bk = bk;
this.conf = conf;
this.scheduler = scheduler;
}
public void start() {
this.bk
.getMetadataClientDriver()
.getRegistrationClient()
.watchWritableBookies(bookies -> availableBookiesChanged(bookies.getValue()));
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
synchronized (BookieInfoReader.this) {
if (LOG.isDebugEnabled()) {
LOG.debug("Running periodic BookieInfo scan");
}
try {
Collection<BookieId> updatedBookies = bk.bookieWatcher.getBookies();
bookieInfoMap.updateBookies(updatedBookies);
} catch (BKException e) {
LOG.info("Got exception while querying bookies from watcher, rerunning after {}s",
conf.getGetBookieInfoRetryIntervalSeconds(), e);
scheduler.schedule(this, conf.getGetBookieInfoRetryIntervalSeconds(), TimeUnit.SECONDS);
return;
}
if (instanceState.tryStartFull()) {
getReadWriteBookieInfo();
}
}
}
}, 0, conf.getGetBookieInfoIntervalSeconds(), TimeUnit.SECONDS);
}
private void submitTask() {
scheduler.submit(() -> getReadWriteBookieInfo());
}
private void submitTaskWithDelay(int delaySeconds) {
scheduler.schedule(() -> getReadWriteBookieInfo(), delaySeconds, TimeUnit.SECONDS);
}
synchronized void availableBookiesChanged(Set<BookieId> updatedBookiesList) {
if (LOG.isInfoEnabled()) {
LOG.info("Scheduling bookie info read due to changes in available bookies.");
}
bookieInfoMap.updateBookies(updatedBookiesList);
if (instanceState.tryStartPartial()) {
submitTask();
}
}
/**
* Method to allow tests to block until bookie info is available.
*
* @param bookie to lookup
* @return None if absent, free disk space if present
*/
synchronized Optional<Long> getFreeDiskSpace(BookieId bookie) {
BookieInfo bookieInfo = bookieInfoMap.getInfo(bookie);
if (bookieInfo != null) {
return Optional.of(bookieInfo.getFreeDiskSpace());
} else {
return Optional.empty();
}
}
/* State to track scan execution progress as callbacks come in */
private int totalSent = 0;
private int completedCnt = 0;
private int errorCnt = 0;
/**
* Performs scan described by instanceState using the cached bookie information
* in bookieInfoMap.
*/
synchronized void getReadWriteBookieInfo() {
State queuedType = instanceState.getAndClearQueuedType();
Collection<BookieId> toScan;
if (queuedType == State.FULL) {
if (LOG.isDebugEnabled()) {
LOG.debug("Doing full scan");
}
toScan = bookieInfoMap.getFullScanTargets();
} else if (queuedType == State.PARTIAL) {
if (LOG.isDebugEnabled()) {
LOG.debug("Doing partial scan");
}
toScan = bookieInfoMap.getPartialScanTargets();
} else {
if (LOG.isErrorEnabled()) {
LOG.error("Invalid state, queuedType cannot be UNQUEUED in getReadWriteBookieInfo");
}
assert(queuedType != State.UNQUEUED);
return;
}
BookieClient bkc = bk.getBookieClient();
final long requested = BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE
| BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE;
totalSent = 0;
completedCnt = 0;
errorCnt = 0;
if (LOG.isDebugEnabled()) {
LOG.debug("Getting bookie info for: {}", toScan);
}
for (BookieId b : toScan) {
bkc.getBookieInfo(b, requested,
new GetBookieInfoCallback() {
void processReadInfoComplete(int rc, BookieInfo bInfo, Object ctx) {
synchronized (BookieInfoReader.this) {
BookieId b = (BookieId) ctx;
if (rc != BKException.Code.OK) {
if (LOG.isErrorEnabled()) {
LOG.error("Reading bookie info from bookie {} failed due to {}",
b, BKException.codeLogger(rc));
}
// We reread bookies missing from the map each time, so remove to ensure
// we get to it on the next scan
bookieInfoMap.clearInfo(b);
errorCnt++;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Bookie Info for bookie {} is {}", b, bInfo);
}
bookieInfoMap.gotInfo(b, bInfo);
}
completedCnt++;
if (totalSent == completedCnt) {
onExit();
}
}
}
@Override
public void getBookieInfoComplete(final int rc, final BookieInfo bInfo, final Object ctx) {
scheduler.submit(
new Runnable() {
@Override
public void run() {
processReadInfoComplete(rc, bInfo, ctx);
}
});
}
}, b);
totalSent++;
}
if (totalSent == 0) {
onExit();
}
}
void onExit() {
bk.placementPolicy.updateBookieInfo(bookieInfoMap.getBookieMap());
if (errorCnt > 0) {
if (LOG.isInfoEnabled()) {
LOG.info("Rescheduling in {}s due to errors", conf.getGetBookieInfoIntervalSeconds());
}
instanceState.tryStartPartial();
submitTaskWithDelay(conf.getGetBookieInfoRetryIntervalSeconds());
} else if (instanceState.completeUnlessQueued()) {
if (LOG.isInfoEnabled()) {
LOG.info("Rescheduling, another scan is pending");
}
submitTask();
}
}
Map<BookieId, BookieInfo> getBookieInfo() throws BKException, InterruptedException {
BookieClient bkc = bk.getBookieClient();
final AtomicInteger totalSent = new AtomicInteger();
final AtomicInteger totalCompleted = new AtomicInteger();
final ConcurrentMap<BookieId, BookieInfo> map =
new ConcurrentHashMap<BookieId, BookieInfo>();
final CountDownLatch latch = new CountDownLatch(1);
long requested = BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE
| BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE;
Collection<BookieId> bookies;
bookies = bk.bookieWatcher.getBookies();
bookies.addAll(bk.bookieWatcher.getReadOnlyBookies());
if (bookies.isEmpty()) {
return map;
}
totalSent.set(bookies.size());
for (BookieId b : bookies) {
bkc.getBookieInfo(b, requested, new GetBookieInfoCallback() {
@Override
public void getBookieInfoComplete(int rc, BookieInfo bInfo, Object ctx) {
BookieId b = (BookieId) ctx;
if (rc != BKException.Code.OK) {
if (LOG.isErrorEnabled()) {
LOG.error("Reading bookie info from bookie {} failed due to {}",
b, BKException.codeLogger(rc));
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Free disk space on bookie {} is {}.", b, bInfo.getFreeDiskSpace());
}
map.put(b, bInfo);
}
if (totalCompleted.incrementAndGet() == totalSent.get()) {
latch.countDown();
}
}
}, b);
}
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.error("Received InterruptedException ", e);
throw e;
}
return map;
}
}
| 366 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ITopologyAwareEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.List;
import java.util.Set;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.common.annotation.InterfaceAudience;
import org.apache.bookkeeper.common.annotation.InterfaceStability;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieNode;
import org.apache.bookkeeper.net.Node;
/**
* Interface for topology aware ensemble placement policy.
*
* <p>All the implementations of this interface are using {@link org.apache.bookkeeper.net.NetworkTopology}
* for placing ensembles.
*
* @see EnsemblePlacementPolicy
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface ITopologyAwareEnsemblePlacementPolicy<T extends Node> extends EnsemblePlacementPolicy {
/**
* Predicate used when choosing an ensemble.
*/
interface Predicate<T extends Node> {
boolean apply(T candidate, Ensemble<T> chosenBookies);
}
/**
* Ensemble used to hold the result of an ensemble selected for placement.
*/
interface Ensemble<T extends Node> {
/**
* Append the new bookie node to the ensemble only if the ensemble doesnt
* already contain the same bookie.
*
* @param node
* new candidate bookie node.
* @return
* true if the node was added
*/
boolean addNode(T node);
/**
* @return list of addresses representing the ensemble
*/
List<BookieId> toList();
/**
* Validates if an ensemble is valid.
*
* @return true if the ensemble is valid; false otherwise
*/
boolean validate();
}
/**
* Create an ensemble with parent ensemble.
*
* @param ensembleSize
* ensemble size
* @param writeQuorumSize
* write quorum size
* @param ackQuorumSize
* ack quorum size
* @param excludeBookies
* exclude bookies
* @param parentEnsemble
* parent ensemble
* @return list of bookies forming the ensemble
* @throws BKException.BKNotEnoughBookiesException
*/
PlacementResult<List<BookieId>> newEnsemble(
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
Set<BookieId> excludeBookies,
Ensemble<T> parentEnsemble,
Predicate<T> parentPredicate)
throws BKException.BKNotEnoughBookiesException;
/**
* Select a node from a given network location.
*
* @param networkLoc
* network location
* @param excludeBookies
* exclude bookies set
* @param predicate
* predicate to apply
* @param ensemble
* ensemble
* @param fallbackToRandom
* fallbackToRandom
* @return the selected bookie.
* @throws BKException.BKNotEnoughBookiesException
*/
T selectFromNetworkLocation(String networkLoc,
Set<Node> excludeBookies,
Predicate<T> predicate,
Ensemble<T> ensemble,
boolean fallbackToRandom)
throws BKException.BKNotEnoughBookiesException;
/**
* Select a node from cluster excluding excludeBookies and bookie nodes of
* excludeRacks. If there isn't a BookieNode excluding those racks and
* nodes, then if fallbackToRandom is set to true then pick a random node
* from cluster just excluding excludeBookies.
*
* @param excludeRacks
* @param excludeBookies
* @param predicate
* @param ensemble
* @param fallbackToRandom
* @return
* @throws BKException.BKNotEnoughBookiesException
*/
T selectFromNetworkLocation(Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKException.BKNotEnoughBookiesException;
/**
* Select a node from networkLoc rack excluding excludeBookies. If there
* isn't any node in 'networkLoc', then it will try to get a node from
* cluster excluding excludeRacks and excludeBookies. If fallbackToRandom is
* set to true then it will get a random bookie from cluster excluding
* excludeBookies if it couldn't find a bookie
*
* @param networkLoc
* @param excludeRacks
* @param excludeBookies
* @param predicate
* @param ensemble
* @param fallbackToRandom
* @return
* @throws BKNotEnoughBookiesException
*/
T selectFromNetworkLocation(String networkLoc,
Set<String> excludeRacks,
Set<Node> excludeBookies,
Predicate<BookieNode> predicate,
Ensemble<BookieNode> ensemble,
boolean fallbackToRandom)
throws BKNotEnoughBookiesException;
/**
* Handle bookies that left.
*
* @param leftBookies
* bookies that left
*/
void handleBookiesThatLeft(Set<BookieId> leftBookies);
/**
* Handle bookies that joined.
*
* @param joinedBookies
* bookies that joined.
*/
void handleBookiesThatJoined(Set<BookieId> joinedBookies);
/**
* Handle rack change for the bookies.
*
* @param bookieAddressList
*/
void onBookieRackChange(List<BookieId> bookieAddressList);
}
| 367 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/MetadataUpdateLoop.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import com.google.common.util.concurrent.RateLimiter;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.function.Supplier;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Mechanism to safely update the metadata of a ledger.
*
* <p>The loop takes the following steps:
* 1. Check if the metadata needs to be changed.
* 2. Make a copy of the metadata and modify it.
* 3. Write the modified copy to zookeeper.
* 3.1 If the write succeeds, go to 6.
* 3.2 If the write fails because of a failed compare and swap, go to 4.
* 4. Read the metadata back from the store
* 5. Update the local copy of the metadata with the metadata read in 4, go to 1.
* 6. Update the local copy of the metadata with the metadata which has just been written.
*
* <p>All mutating operations are compare and swap operation. If the compare fails, another
* iteration of the loop begins.
*/
class MetadataUpdateLoop {
static final Logger LOG = LoggerFactory.getLogger(MetadataUpdateLoop.class);
private final LedgerManager lm;
private final long ledgerId;
private final Supplier<Versioned<LedgerMetadata>> currentLocalValue;
private final NeedsUpdatePredicate needsTransformation;
private final MetadataTransform transform;
private final LocalValueUpdater updateLocalValue;
private final RateLimiter throttler;
private final String logContext;
private volatile int writeLoopCount = 0;
private static final AtomicIntegerFieldUpdater<MetadataUpdateLoop> WRITE_LOOP_COUNT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(MetadataUpdateLoop.class, "writeLoopCount");
interface NeedsUpdatePredicate {
boolean needsUpdate(LedgerMetadata metadata) throws Exception;
}
interface MetadataTransform {
LedgerMetadata transform(LedgerMetadata metadata) throws Exception;
}
interface LocalValueUpdater {
boolean updateValue(Versioned<LedgerMetadata> oldValue, Versioned<LedgerMetadata> newValue);
}
MetadataUpdateLoop(LedgerManager lm,
long ledgerId,
Supplier<Versioned<LedgerMetadata>> currentLocalValue,
NeedsUpdatePredicate needsTransformation,
MetadataTransform transform,
LocalValueUpdater updateLocalValue) {
this(lm, ledgerId, currentLocalValue, needsTransformation, transform, updateLocalValue, null);
}
/**
* Construct the loop. This takes a set of functions which may be called multiple times
* during the loop.
*
* @param lm the ledger manager used for reading and writing metadata
* @param ledgerId the id of the ledger we will be operating on
* @param currentLocalValue should return the current local value of the metadata
* @param needsTransformation should return true, if the metadata needs to be modified.
* should throw an exception, if this update doesn't make sense.
* @param transform takes a metadata objects, transforms, and returns it, without modifying
* the original
* @param updateLocalValue if the local value matches the first parameter, update it to the
* second parameter and return true, return false otherwise
*/
MetadataUpdateLoop(LedgerManager lm,
long ledgerId,
Supplier<Versioned<LedgerMetadata>> currentLocalValue,
NeedsUpdatePredicate needsTransformation,
MetadataTransform transform,
LocalValueUpdater updateLocalValue,
RateLimiter throttler) {
this.lm = lm;
this.ledgerId = ledgerId;
this.currentLocalValue = currentLocalValue;
this.needsTransformation = needsTransformation;
this.transform = transform;
this.updateLocalValue = updateLocalValue;
this.throttler = throttler;
this.logContext = String.format("UpdateLoop(ledgerId=%d,loopId=%08x)", ledgerId, System.identityHashCode(this));
}
CompletableFuture<Versioned<LedgerMetadata>> run() {
CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
writeLoop(currentLocalValue.get(), promise);
return promise;
}
private void writeLoop(Versioned<LedgerMetadata> currentLocal,
CompletableFuture<Versioned<LedgerMetadata>> promise) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} starting write loop iteration, attempt {}",
logContext, WRITE_LOOP_COUNT_UPDATER.incrementAndGet(this));
}
try {
if (needsTransformation.needsUpdate(currentLocal.getValue())) {
LedgerMetadata transformed = transform.transform(currentLocal.getValue());
if (throttler != null) {
// throttler to control updates per second
throttler.acquire();
}
lm.writeLedgerMetadata(ledgerId, transformed, currentLocal.getVersion())
.whenComplete((writtenMetadata, ex) -> {
if (ex == null) {
if (updateLocalValue.updateValue(currentLocal, writtenMetadata)) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} success", logContext);
}
promise.complete(writtenMetadata);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("{} local value changed while we were writing, try again",
logContext);
}
writeLoop(currentLocalValue.get(), promise);
}
} else if (ex instanceof BKException.BKMetadataVersionException) {
LOG.info("{} conflict writing metadata to store, update local value and try again",
logContext);
updateLocalValueFromStore(ledgerId).whenComplete((readMetadata, readEx) -> {
if (readEx == null) {
writeLoop(readMetadata, promise);
} else {
promise.completeExceptionally(readEx);
}
});
} else {
LOG.error("{} Error writing metadata to store", logContext, ex);
promise.completeExceptionally(ex);
}
});
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("{} Update not needed, completing", logContext);
}
promise.complete(currentLocal);
}
} catch (Exception e) {
LOG.error("{} Exception updating", logContext, e);
promise.completeExceptionally(e);
}
}
private CompletableFuture<Versioned<LedgerMetadata>> updateLocalValueFromStore(long ledgerId) {
CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
readLoop(ledgerId, promise);
return promise;
}
private void readLoop(long ledgerId, CompletableFuture<Versioned<LedgerMetadata>> promise) {
Versioned<LedgerMetadata> current = currentLocalValue.get();
lm.readLedgerMetadata(ledgerId).whenComplete(
(read, exception) -> {
if (exception != null) {
LOG.error("{} Failed to read metadata from store",
logContext, exception);
promise.completeExceptionally(exception);
} else if (current.getVersion().compare(read.getVersion()) == Version.Occurred.CONCURRENTLY) {
// no update needed, these are the same in the immutable world
promise.complete(current);
} else if (updateLocalValue.updateValue(current, read)) {
// updated local value successfully
promise.complete(read);
} else {
// local value changed while we were reading,
// look at new value, and try to read again
readLoop(ledgerId, promise);
}
});
}
}
| 368 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/SyncCounter.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import java.util.Enumeration;
/**
* Implements objects to help with the synchronization of asynchronous calls.
*
*/
class SyncCounter {
int i;
int rc;
int total;
Enumeration<LedgerEntry> seq = null;
LedgerHandle lh = null;
synchronized void inc() {
i++;
total++;
}
synchronized void dec() {
i--;
notifyAll();
}
synchronized void block(int limit) throws InterruptedException {
while (i > limit) {
int prev = i;
wait();
if (i == prev) {
break;
}
}
}
synchronized int total() {
return total;
}
void setrc(int rc) {
this.rc = rc;
}
int getrc() {
return rc;
}
void setSequence(Enumeration<LedgerEntry> seq) {
this.seq = seq;
}
Enumeration<LedgerEntry> getSequence() {
return seq;
}
void setLh(LedgerHandle lh) {
this.lh = lh;
}
LedgerHandle getLh() {
return lh;
}
}
| 369 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/SyncCallbackUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import java.util.Enumeration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
import org.apache.bookkeeper.client.impl.LastConfirmedAndEntryImpl;
/**
* Utility for callbacks.
*
*/
@Slf4j
class SyncCallbackUtils {
/**
* Wait for a result. This is convenience method to implement callbacks
*
* @param <T>
* @param future
* @return
* @throws InterruptedException
* @throws BKException
*/
public static <T> T waitForResult(CompletableFuture<T> future) throws InterruptedException, BKException {
try {
try {
/*
* CompletableFuture.get() in JDK8 spins before blocking and wastes CPU time.
* CompletableFuture.get(long, TimeUnit) blocks immediately (if the result is
* not yet available). While the implementation of get() has changed in JDK9
* (not spinning any more), using CompletableFuture.get(long, TimeUnit) allows
* us to avoid spinning for all current JDK versions.
*/
return future.get(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (TimeoutException eignore) {
// it's ok to return null if we timeout after 292 years (2^63 nanos)
return null;
}
} catch (ExecutionException err) {
if (err.getCause() instanceof BKException) {
throw (BKException) err.getCause();
} else {
BKException unexpectedConditionException =
BKException.create(BKException.Code.UnexpectedConditionException);
unexpectedConditionException.initCause(err.getCause());
throw unexpectedConditionException;
}
}
}
/**
* Handle the Response Code and transform it to a BKException.
*
* @param <T>
* @param rc
* @param result
* @param future
*/
public static <T> void finish(int rc, T result, CompletableFuture<? super T> future) {
if (rc != BKException.Code.OK) {
future.completeExceptionally(BKException.create(rc).fillInStackTrace());
} else {
future.complete(result);
}
}
static class SyncCreateCallback implements AsyncCallback.CreateCallback {
private final CompletableFuture<? super LedgerHandle> future;
public SyncCreateCallback(CompletableFuture<? super LedgerHandle> future) {
this.future = future;
}
/**
* Create callback implementation for synchronous create call.
*
* @param rc return code
* @param lh ledger handle object
* @param ctx optional control object
*/
@Override
public void createComplete(int rc, LedgerHandle lh, Object ctx) {
finish(rc, lh, future);
}
}
static class SyncCreateAdvCallback implements AsyncCallback.CreateCallback {
private final CompletableFuture<? super LedgerHandleAdv> future;
public SyncCreateAdvCallback(CompletableFuture<? super LedgerHandleAdv> future) {
this.future = future;
}
/**
* Create callback implementation for synchronous create call.
*
* @param rc return code
* @param lh ledger handle object
* @param ctx optional control object
*/
@Override
public void createComplete(int rc, LedgerHandle lh, Object ctx) {
if (lh == null || lh instanceof LedgerHandleAdv) {
finish(rc, (LedgerHandleAdv) lh, future);
} else {
finish(BKException.Code.UnexpectedConditionException, null, future);
}
}
}
static class SyncOpenCallback implements AsyncCallback.OpenCallback {
private final CompletableFuture<? super LedgerHandle> future;
public SyncOpenCallback(CompletableFuture<? super LedgerHandle> future) {
this.future = future;
}
/**
* Callback method for synchronous open operation.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param ctx
* optional control object
*/
@Override
public void openComplete(int rc, LedgerHandle lh, Object ctx) {
finish(rc, lh, future);
}
}
static class SyncDeleteCallback implements AsyncCallback.DeleteCallback {
private final CompletableFuture<Void> future;
public SyncDeleteCallback(CompletableFuture<Void> future) {
this.future = future;
}
/**
* Delete callback implementation for synchronous delete call.
*
* @param rc
* return code
* @param ctx
* optional control object
*/
@Override
public void deleteComplete(int rc, Object ctx) {
finish(rc, null, future);
}
}
static class LastAddConfirmedCallback implements AsyncCallback.AddLacCallback {
static final LastAddConfirmedCallback INSTANCE = new LastAddConfirmedCallback();
/**
* Implementation of callback interface for synchronous read method.
*
* @param rc
* return code
* @param lh
* ledger identifier
* @param ctx
* control object
*/
@Override
public void addLacComplete(int rc, LedgerHandle lh, Object ctx) {
if (rc != BKException.Code.OK) {
log.warn("LastAddConfirmedUpdate failed: {} ", BKException.getMessage(rc));
} else if (log.isDebugEnabled()) {
log.debug("Callback LAC Updated for: {} ", lh.getId());
}
}
}
static class SyncReadCallback implements AsyncCallback.ReadCallback {
private final CompletableFuture<Enumeration<LedgerEntry>> future;
public SyncReadCallback(CompletableFuture<Enumeration<LedgerEntry>> future) {
this.future = future;
}
/**
* Implementation of callback interface for synchronous read method.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param seq
* sequence of entries
* @param ctx
* control object
*/
@Override
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
finish(rc, seq, future);
}
}
static class SyncAddCallback extends CompletableFuture<Long> implements AsyncCallback.AddCallback {
/**
* Implementation of callback interface for synchronous read method.
*
* @param rc
* return code
* @param lh
* ledger handle
* @param entry
* entry identifier
* @param ctx
* control object
*/
@Override
public void addComplete(int rc, LedgerHandle lh, long entry, Object ctx) {
finish(rc, entry, this);
}
}
static class FutureReadLastConfirmed extends CompletableFuture<Long>
implements AsyncCallback.ReadLastConfirmedCallback {
@Override
public void readLastConfirmedComplete(int rc, long lastConfirmed, Object ctx) {
finish(rc, lastConfirmed, this);
}
}
static class SyncReadLastConfirmedCallback implements AsyncCallback.ReadLastConfirmedCallback {
/**
* Implementation of callback interface for synchronous read last confirmed method.
*/
@Override
public void readLastConfirmedComplete(int rc, long lastConfirmed, Object ctx) {
LedgerHandle.LastConfirmedCtx lcCtx = (LedgerHandle.LastConfirmedCtx) ctx;
synchronized (lcCtx) {
lcCtx.setRC(rc);
lcCtx.setLastConfirmed(lastConfirmed);
lcCtx.notify();
}
}
}
static class SyncCloseCallback implements AsyncCallback.CloseCallback {
private final CompletableFuture<Void> future;
public SyncCloseCallback(CompletableFuture<Void> future) {
this.future = future;
}
/**
* Close callback method.
*
* @param rc
* @param lh
* @param ctx
*/
@Override
public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
finish(rc, null, future);
}
}
static class FutureReadLastConfirmedAndEntry
extends CompletableFuture<LastConfirmedAndEntry> implements AsyncCallback.ReadLastConfirmedAndEntryCallback {
@Override
public void readLastConfirmedAndEntryComplete(int rc, long lastConfirmed, LedgerEntry entry, Object ctx) {
LastConfirmedAndEntry result = LastConfirmedAndEntryImpl.create(lastConfirmed, entry);
finish(rc, result, this);
}
}
}
| 370 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerMetadataImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Optional;
import java.util.TreeMap;
import java.util.stream.Collectors;
import lombok.EqualsAndHashCode;
import org.apache.bookkeeper.client.api.DigestType;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.LedgerMetadata.State;
import org.apache.bookkeeper.net.BookieId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulates all the ledger metadata that is persistently stored
* in metadata store.
*
* <p>It provides parsing and serialization methods of such metadata.
*/
@EqualsAndHashCode(exclude =
"ledgerId" // ledgerId is not serialized inside ZK node data
)
class LedgerMetadataImpl implements LedgerMetadata {
static final Logger LOG = LoggerFactory.getLogger(LedgerMetadataImpl.class);
private final long ledgerId;
private final int metadataFormatVersion;
private final int ensembleSize;
private final int writeQuorumSize;
private final int ackQuorumSize;
private final State state;
private final long length;
private final long lastEntryId;
private final long ctime;
final boolean storeCtime; // non-private so builder can access for copy
private final NavigableMap<Long, ImmutableList<BookieId>> ensembles;
private final ImmutableList<BookieId> currentEnsemble;
private final boolean hasPassword;
private final DigestType digestType;
private final byte[] password;
private final Map<String, byte[]> customMetadata;
private long cToken;
LedgerMetadataImpl(long ledgerId,
int metadataFormatVersion,
int ensembleSize,
int writeQuorumSize,
int ackQuorumSize,
State state,
Optional<Long> lastEntryId,
Optional<Long> length,
Map<Long, List<BookieId>> ensembles,
Optional<DigestType> digestType,
Optional<byte[]> password,
long ctime,
boolean storeCtime,
long cToken,
Map<String, byte[]> customMetadata) {
checkArgument(ensembles.size() > 0, "There must be at least one ensemble in the ledger");
if (state == State.CLOSED) {
checkArgument(length.isPresent(), "Closed ledger must have a length");
checkArgument(lastEntryId.isPresent(), "Closed ledger must have a last entry");
} else {
checkArgument(!length.isPresent(), "Non-closed ledger must not have a length");
checkArgument(!lastEntryId.isPresent(), "Non-closed ledger must not have a last entry");
}
checkArgument((digestType.isPresent() && password.isPresent())
|| (!digestType.isPresent() && !password.isPresent()),
"Either both password and digest type must be set, or neither");
this.ledgerId = ledgerId;
this.metadataFormatVersion = metadataFormatVersion;
this.ensembleSize = ensembleSize;
this.writeQuorumSize = writeQuorumSize;
this.ackQuorumSize = ackQuorumSize;
this.state = state;
this.lastEntryId = lastEntryId.orElse(LedgerHandle.INVALID_ENTRY_ID);
this.length = length.orElse(0L);
this.ensembles = Collections.unmodifiableNavigableMap(
ensembles.entrySet().stream().collect(TreeMap::new,
(m, e) -> m.put(e.getKey(),
ImmutableList.copyOf(e.getValue())),
TreeMap::putAll));
if (state != State.CLOSED) {
currentEnsemble = this.ensembles.lastEntry().getValue();
} else {
currentEnsemble = null;
}
if (password.isPresent()) {
this.password = password.get();
this.digestType = digestType.get();
this.hasPassword = true;
} else {
this.password = null;
this.hasPassword = false;
this.digestType = null;
}
this.ctime = ctime;
this.storeCtime = storeCtime;
this.cToken = cToken;
this.customMetadata = ImmutableMap.copyOf(customMetadata);
}
@Override
public long getLedgerId() {
return ledgerId;
}
@Override
public NavigableMap<Long, ? extends List<BookieId>> getAllEnsembles() {
return ensembles;
}
@Override
public int getEnsembleSize() {
return ensembleSize;
}
@Override
public int getWriteQuorumSize() {
return writeQuorumSize;
}
@Override
public int getAckQuorumSize() {
return ackQuorumSize;
}
@Override
public long getCtime() {
return ctime;
}
/**
* In versions 4.1.0 and below, the digest type and password were not
* stored in the metadata.
*
* @return whether the password has been stored in the metadata
*/
@Override
public boolean hasPassword() {
return hasPassword;
}
@Override
public byte[] getPassword() {
if (!hasPassword()) {
return new byte[0];
} else {
return Arrays.copyOf(password, password.length);
}
}
@Override
public DigestType getDigestType() {
if (!hasPassword()) {
return null;
} else {
return digestType;
}
}
@Override
public long getLastEntryId() {
return lastEntryId;
}
@Override
public long getLength() {
return length;
}
@Override
public boolean isClosed() {
return state == State.CLOSED;
}
@Override
public State getState() {
return state;
}
@Override
public List<BookieId> getEnsembleAt(long entryId) {
// the head map cannot be empty, since we insert an ensemble for
// entry-id 0, right when we start
return ensembles.get(ensembles.headMap(entryId + 1).lastKey());
}
@Override
public Map<String, byte[]> getCustomMetadata() {
return this.customMetadata;
}
@Override
public String toString() {
return toStringRepresentation(true);
}
/**
* Returns a string representation of this LedgerMetadata object by
* filtering out the password field.
*
* @return a string representation of the object without password field in
* it.
*/
@Override
public String toSafeString() {
return toStringRepresentation(false);
}
private String toStringRepresentation(boolean withPassword) {
MoreObjects.ToStringHelper helper = MoreObjects.toStringHelper("LedgerMetadata");
helper.add("formatVersion", metadataFormatVersion)
.add("ensembleSize", ensembleSize)
.add("writeQuorumSize", writeQuorumSize)
.add("ackQuorumSize", ackQuorumSize)
.add("state", state);
if (state == State.CLOSED) {
helper.add("length", length)
.add("lastEntryId", lastEntryId);
}
if (hasPassword()) {
helper.add("digestType", digestType);
if (withPassword) {
helper.add("password", "base64:" + Base64.getEncoder().encodeToString(password));
} else {
helper.add("password", "OMITTED");
}
}
helper.add("ensembles", ensembles.toString());
helper.add("customMetadata",
customMetadata.entrySet().stream().collect(
Collectors.toMap(e -> e.getKey(),
e -> "base64:" + Base64.getEncoder().encodeToString(e.getValue()))));
return helper.toString();
}
@Override
public int getMetadataFormatVersion() {
return metadataFormatVersion;
}
boolean shouldStoreCtime() {
return storeCtime;
}
@Override
public long getCToken() {
return cToken;
}
}
| 371 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/package-info.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
/**
* BookKeeper Client.
*/
package org.apache.bookkeeper.client;
| 372 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ForceLedgerOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Preconditions.checkState;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.ForceLedgerCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a request to sync the ledger on every bookie.
*/
class ForceLedgerOp implements Runnable, ForceLedgerCallback {
private static final Logger LOG = LoggerFactory.getLogger(ForceLedgerOp.class);
final CompletableFuture<Void> cb;
DistributionSchedule.AckSet ackSet;
boolean completed = false;
boolean errored = false;
int lastSeenError = BKException.Code.WriteException;
final List<BookieId> currentEnsemble;
long currentNonDurableLastAddConfirmed = LedgerHandle.INVALID_ENTRY_ID;
final LedgerHandle lh;
final BookieClient bookieClient;
ForceLedgerOp(LedgerHandle lh, BookieClient bookieClient,
List<BookieId> ensemble,
CompletableFuture<Void> cb) {
this.lh = lh;
this.bookieClient = bookieClient;
this.cb = cb;
this.currentEnsemble = ensemble;
}
void sendForceLedgerRequest(int bookieIndex) {
bookieClient.forceLedger(currentEnsemble.get(bookieIndex), lh.ledgerId, this, bookieIndex);
}
@Override
public void run() {
initiate();
}
void initiate() {
// capture currentNonDurableLastAddConfirmed
// remember that we are inside OrderedExecutor, this induces a strict ordering
// on the sequence of events
this.currentNonDurableLastAddConfirmed = lh.pendingAddsSequenceHead;
if (LOG.isDebugEnabled()) {
LOG.debug("force {} clientNonDurableLac {}", lh.ledgerId, currentNonDurableLastAddConfirmed);
}
// we need to send the request to every bookie in the ensamble
this.ackSet = lh.distributionSchedule.getEnsembleAckSet();
DistributionSchedule.WriteSet writeSet = lh.getDistributionSchedule()
.getEnsembleSet(currentNonDurableLastAddConfirmed);
try {
for (int i = 0; i < writeSet.size(); i++) {
sendForceLedgerRequest(writeSet.get(i));
}
} finally {
writeSet.recycle();
}
}
@Override
public void forceLedgerComplete(int rc, long ledgerId, BookieId addr, Object ctx) {
int bookieIndex = (Integer) ctx;
checkState(!completed, "We are waiting for all the bookies, it is not expected an early exit");
if (errored) {
// already failed, do not fire error callbacks twice
return;
}
if (BKException.Code.OK != rc) {
lastSeenError = rc;
}
if (rc == BKException.Code.OK) {
if (ackSet.completeBookieAndCheck(bookieIndex)) {
completed = true;
// we are able to say that every bookie sync'd its own journal
// for every acknowledged entry before issuing the force() call
if (LOG.isDebugEnabled()) {
LOG.debug("After force on ledger {} updating LastAddConfirmed to {} ",
ledgerId, currentNonDurableLastAddConfirmed);
}
lh.updateLastConfirmed(currentNonDurableLastAddConfirmed, lh.getLength());
FutureUtils.complete(cb, null);
}
} else {
// at least one bookie failed, as we are waiting for all the bookies
// we can fail immediately
LOG.error("ForceLedger did not succeed: Ledger {} on {}", ledgerId, addr);
errored = true;
// notify the failure
FutureUtils.completeExceptionally(cb, BKException.create(lastSeenError));
}
}
}
| 373 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookieAddressResolverDisabled.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.proto.BookieAddressResolver;
/**
* Resolve legacy style BookieIDs to Network addresses.
*/
@Slf4j
public final class BookieAddressResolverDisabled implements BookieAddressResolver {
public BookieAddressResolverDisabled() {
}
@Override
public BookieSocketAddress resolve(BookieId bookieId) {
return BookieSocketAddress.resolveLegacyBookieId(bookieId);
}
}
| 374 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ClientContext.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import io.netty.buffer.ByteBufAllocator;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.proto.BookieClient;
/**
* Collection of client objects used by LedgerHandle to interact with
* the outside world. Normally these are instantiated by the BookKeeper object
* but they are present to the LedgerHandle through this interface to allow
* tests to easily inject mocked versions.
*/
public interface ClientContext {
ClientInternalConf getConf();
LedgerManager getLedgerManager();
BookieWatcher getBookieWatcher();
EnsemblePlacementPolicy getPlacementPolicy();
BookieClient getBookieClient();
ByteBufAllocator getByteBufAllocator();
OrderedExecutor getMainWorkerPool();
OrderedScheduler getScheduler();
BookKeeperClientStats getClientStats();
boolean isClientClosed();
}
| 375 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/ReadLastConfirmedAndEntryOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.buffer.ByteBuf;
import java.util.BitSet;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.impl.LedgerEntryImpl;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks;
import org.apache.bookkeeper.proto.ReadLastConfirmedAndEntryContext;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.apache.bookkeeper.util.MathUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Long poll read operation.
*/
class ReadLastConfirmedAndEntryOp implements BookkeeperInternalCallbacks.ReadEntryCallback,
SpeculativeRequestExecutor {
static final Logger LOG = LoggerFactory.getLogger(ReadLastConfirmedAndEntryOp.class);
ReadLACAndEntryRequest request;
final BitSet heardFromHostsBitSet;
final BitSet emptyResponsesFromHostsBitSet;
final int maxMissedReadsAllowed;
boolean parallelRead = false;
final AtomicBoolean requestComplete = new AtomicBoolean(false);
final long requestTimeNano;
private final LedgerHandle lh;
private final ClientContext clientCtx;
private final LastConfirmedAndEntryCallback cb;
private int numResponsesPending;
private final int numEmptyResponsesAllowed;
private volatile boolean hasValidResponse = false;
private final long prevEntryId;
private long lastAddConfirmed;
private long timeOutInMillis;
private final List<BookieId> currentEnsemble;
private ScheduledFuture<?> speculativeTask = null;
abstract class ReadLACAndEntryRequest implements AutoCloseable {
final AtomicBoolean complete = new AtomicBoolean(false);
int rc = BKException.Code.OK;
int firstError = BKException.Code.OK;
int numMissedEntryReads = 0;
final List<BookieId> ensemble;
final DistributionSchedule.WriteSet writeSet;
final DistributionSchedule.WriteSet orderedEnsemble;
final LedgerEntryImpl entryImpl;
ReadLACAndEntryRequest(List<BookieId> ensemble, long lId, long eId) {
this.entryImpl = LedgerEntryImpl.create(lId, eId);
this.ensemble = ensemble;
this.writeSet = lh.getDistributionSchedule().getEnsembleSet(eId);
if (clientCtx.getConf().enableReorderReadSequence) {
this.orderedEnsemble = clientCtx.getPlacementPolicy().reorderReadLACSequence(ensemble,
lh.getBookiesHealthInfo(), writeSet.copy());
} else {
this.orderedEnsemble = writeSet.copy();
}
}
@Override
public void close() {
entryImpl.close();
}
synchronized int getFirstError() {
return firstError;
}
/**
* Execute the read request.
*/
abstract void read();
/**
* Complete the read request from <i>host</i>.
*
* @param bookieIndex
* bookie index
* @param host
* host that respond the read
* @param buffer
* the data buffer
* @return return true if we managed to complete the entry;
* otherwise return false if the read entry is not complete or it is already completed before
*/
boolean complete(int bookieIndex, BookieId host, final ByteBuf buffer, long entryId) {
ByteBuf content;
try {
content = lh.getDigestManager().verifyDigestAndReturnData(entryId, buffer);
} catch (BKException.BKDigestMatchException e) {
logErrorAndReattemptRead(bookieIndex, host, "Mac mismatch", BKException.Code.DigestMatchException);
return false;
}
if (!complete.getAndSet(true)) {
writeSet.recycle();
orderedEnsemble.recycle();
rc = BKException.Code.OK;
/*
* The length is a long and it is the last field of the metadata of an entry.
* Consequently, we have to subtract 8 from METADATA_LENGTH to get the length.
*/
entryImpl.setLength(buffer.getLong(DigestManager.METADATA_LENGTH - 8));
entryImpl.setEntryBuf(content);
return true;
} else {
return false;
}
}
/**
* Fail the request with given result code <i>rc</i>.
*
* @param rc
* result code to fail the request.
* @return true if we managed to fail the entry; otherwise return false if it already failed or completed.
*/
boolean fail(int rc) {
if (complete.compareAndSet(false, true)) {
writeSet.recycle();
orderedEnsemble.recycle();
this.rc = rc;
translateAndSetFirstError(rc);
completeRequest();
return true;
} else {
return false;
}
}
private synchronized void translateAndSetFirstError(int rc) {
if (BKException.Code.OK == firstError
|| BKException.Code.NoSuchEntryException == firstError
|| BKException.Code.NoSuchLedgerExistsException == firstError) {
firstError = rc;
} else if (BKException.Code.BookieHandleNotAvailableException == firstError
&& BKException.Code.NoSuchEntryException != rc
&& BKException.Code.NoSuchLedgerExistsException != rc) {
// if other exception rather than NoSuchEntryException is returned
// we need to update firstError to indicate that it might be a valid read but just failed.
firstError = rc;
}
}
/**
* Log error <i>errMsg</i> and reattempt read from <i>host</i>.
*
* @param bookieIndex
* bookie index
* @param host
* host that just respond
* @param errMsg
* error msg to log
* @param rc
* read result code
*/
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
translateAndSetFirstError(rc);
if (BKException.Code.NoSuchEntryException == rc || BKException.Code.NoSuchLedgerExistsException == rc) {
// Since we send all long poll requests to every available node, we should only
// treat these errors as failures if the node from which we received this is part of
// the writeSet
if (this.writeSet.contains(bookieIndex)) {
lh.registerOperationFailureOnBookie(host, entryImpl.getEntryId());
}
++numMissedEntryReads;
}
if (LOG.isDebugEnabled()) {
LOG.debug("{} while reading entry: {} ledgerId: {} from bookie: {}", errMsg, entryImpl.getEntryId(),
lh.getId(), host);
}
}
/**
* Send to next replica speculatively, if required and possible.
* This returns the host we may have sent to for unit testing.
*
* @param heardFromHostsBitSet
* the set of hosts that we already received responses.
* @return host we sent to if we sent. null otherwise.
*/
abstract BookieId maybeSendSpeculativeRead(BitSet heardFromHostsBitSet);
/**
* Whether the read request completed.
*
* @return true if the read request is completed.
*/
boolean isComplete() {
return complete.get();
}
/**
* Get result code of this entry.
*
* @return result code.
*/
int getRc() {
return rc;
}
@Override
public String toString() {
return String.format("L%d-E%d", entryImpl.getLedgerId(), entryImpl.getEntryId());
}
}
class ParallelReadRequest extends ReadLACAndEntryRequest {
int numPendings;
ParallelReadRequest(List<BookieId> ensemble, long lId, long eId) {
super(ensemble, lId, eId);
numPendings = orderedEnsemble.size();
}
@Override
void read() {
for (int i = 0; i < orderedEnsemble.size(); i++) {
BookieId to = ensemble.get(orderedEnsemble.get(i));
try {
sendReadTo(orderedEnsemble.get(i), to, this);
} catch (InterruptedException ie) {
LOG.error("Interrupted reading entry {} : ", this, ie);
Thread.currentThread().interrupt();
fail(BKException.Code.InterruptedException);
return;
}
}
}
@Override
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
super.logErrorAndReattemptRead(bookieIndex, host, errMsg, rc);
--numPendings;
// if received all responses or this entry doesn't meet quorum write, complete the request.
if (numMissedEntryReads > maxMissedReadsAllowed || numPendings == 0) {
if (BKException.Code.BookieHandleNotAvailableException == firstError
&& numMissedEntryReads > maxMissedReadsAllowed) {
firstError = BKException.Code.NoSuchEntryException;
}
fail(firstError);
}
}
@Override
BookieId maybeSendSpeculativeRead(BitSet heardFromHostsBitSet) {
// no speculative read
return null;
}
}
class SequenceReadRequest extends ReadLACAndEntryRequest {
static final int NOT_FOUND = -1;
int nextReplicaIndexToReadFrom = 0;
final BitSet sentReplicas;
final BitSet erroredReplicas;
final BitSet emptyResponseReplicas;
SequenceReadRequest(List<BookieId> ensemble, long lId, long eId) {
super(ensemble, lId, eId);
this.sentReplicas = new BitSet(orderedEnsemble.size());
this.erroredReplicas = new BitSet(orderedEnsemble.size());
this.emptyResponseReplicas = new BitSet(orderedEnsemble.size());
}
private synchronized int getNextReplicaIndexToReadFrom() {
return nextReplicaIndexToReadFrom;
}
private int getReplicaIndex(int bookieIndex) {
return orderedEnsemble.indexOf(bookieIndex);
}
private BitSet getSentToBitSet() {
BitSet b = new BitSet(ensemble.size());
for (int i = 0; i < sentReplicas.length(); i++) {
if (sentReplicas.get(i)) {
b.set(orderedEnsemble.get(i));
}
}
return b;
}
private boolean readsOutstanding() {
return (sentReplicas.cardinality() - erroredReplicas.cardinality()
- emptyResponseReplicas.cardinality()) > 0;
}
/**
* Send to next replica speculatively, if required and possible.
* This returns the host we may have sent to for unit testing.
* @return host we sent to if we sent. null otherwise.
*/
@Override
synchronized BookieId maybeSendSpeculativeRead(BitSet heardFrom) {
if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getEnsembleSize()) {
return null;
}
BitSet sentTo = getSentToBitSet();
sentTo.and(heardFrom);
// only send another read, if we have had no response at all (even for other entries)
// from any of the other bookies we have sent the request to
if (sentTo.cardinality() == 0) {
return sendNextRead();
} else {
return null;
}
}
@Override
void read() {
sendNextRead();
}
synchronized BookieId sendNextRead() {
if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getEnsembleSize()) {
// we are done, the read has failed from all replicas, just fail the
// read
// Do it a bit pessimistically, only when finished trying all replicas
// to check whether we received more missed reads than requiredBookiesMissingEntryForRecovery
if (BKException.Code.BookieHandleNotAvailableException == firstError
&& numMissedEntryReads > maxMissedReadsAllowed) {
firstError = BKException.Code.NoSuchEntryException;
}
fail(firstError);
return null;
}
int replica = nextReplicaIndexToReadFrom;
int bookieIndex = orderedEnsemble.get(nextReplicaIndexToReadFrom);
nextReplicaIndexToReadFrom++;
try {
BookieId to = ensemble.get(bookieIndex);
sendReadTo(bookieIndex, to, this);
sentReplicas.set(replica);
return to;
} catch (InterruptedException ie) {
LOG.error("Interrupted reading entry " + this, ie);
Thread.currentThread().interrupt();
fail(BKException.Code.InterruptedException);
return null;
}
}
@Override
synchronized void logErrorAndReattemptRead(int bookieIndex, BookieId host, String errMsg, int rc) {
super.logErrorAndReattemptRead(bookieIndex, host, errMsg, rc);
int replica = getReplicaIndex(bookieIndex);
if (replica == NOT_FOUND) {
LOG.error("Received error from a host which is not in the ensemble {} {}.", host, ensemble);
return;
}
if (BKException.Code.OK == rc) {
emptyResponseReplicas.set(replica);
} else {
erroredReplicas.set(replica);
}
if (!readsOutstanding()) {
sendNextRead();
}
}
@Override
boolean complete(int bookieIndex, BookieId host, ByteBuf buffer, long entryId) {
boolean completed = super.complete(bookieIndex, host, buffer, entryId);
if (completed) {
int numReplicasTried = getNextReplicaIndexToReadFrom();
// Check if any speculative reads were issued and mark any bookies before the
// first speculative read as slow
for (int i = 0; i < numReplicasTried; i++) {
int slowBookieIndex = orderedEnsemble.get(i);
BookieId slowBookieSocketAddress = ensemble.get(slowBookieIndex);
clientCtx.getPlacementPolicy().registerSlowBookie(slowBookieSocketAddress, entryId);
}
}
return completed;
}
}
ReadLastConfirmedAndEntryOp(LedgerHandle lh,
ClientContext clientCtx,
List<BookieId> ensemble,
LastConfirmedAndEntryCallback cb,
long prevEntryId,
long timeOutInMillis) {
this.lh = lh;
this.clientCtx = clientCtx;
this.cb = cb;
this.prevEntryId = prevEntryId;
this.lastAddConfirmed = lh.getLastAddConfirmed();
this.timeOutInMillis = timeOutInMillis;
this.numResponsesPending = 0;
this.currentEnsemble = ensemble;
// since long poll is effectively reading lac with waits, lac can be potentially
// be advanced in different write quorums, so we need to make sure to cover enough
// bookies before claiming lac is not advanced.
this.numEmptyResponsesAllowed = getLedgerMetadata().getEnsembleSize()
- getLedgerMetadata().getAckQuorumSize() + 1;
this.requestTimeNano = MathUtils.nowInNano();
maxMissedReadsAllowed = getLedgerMetadata().getEnsembleSize()
- getLedgerMetadata().getAckQuorumSize();
heardFromHostsBitSet = new BitSet(getLedgerMetadata().getEnsembleSize());
emptyResponsesFromHostsBitSet = new BitSet(getLedgerMetadata().getEnsembleSize());
}
protected LedgerMetadata getLedgerMetadata() {
return lh.getLedgerMetadata();
}
ReadLastConfirmedAndEntryOp parallelRead(boolean enabled) {
this.parallelRead = enabled;
return this;
}
protected void cancelSpeculativeTask(boolean mayInterruptIfRunning) {
if (speculativeTask != null) {
speculativeTask.cancel(mayInterruptIfRunning);
speculativeTask = null;
}
}
/**
* Speculative Read Logic.
*/
@Override
public ListenableFuture<Boolean> issueSpeculativeRequest() {
return clientCtx.getMainWorkerPool().submitOrdered(lh.getId(), new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
if (!requestComplete.get() && !request.isComplete()
&& (null != request.maybeSendSpeculativeRead(heardFromHostsBitSet))) {
if (LOG.isDebugEnabled()) {
LOG.debug("Send speculative ReadLAC {} for ledger {} (previousLAC: {}). Hosts heard are {}.",
request, lh.getId(), lastAddConfirmed, heardFromHostsBitSet);
}
return true;
}
return false;
}
});
}
public void initiate() {
if (parallelRead) {
request = new ParallelReadRequest(currentEnsemble, lh.getId(), prevEntryId + 1);
} else {
request = new SequenceReadRequest(currentEnsemble, lh.getId(), prevEntryId + 1);
}
request.read();
if (!parallelRead && clientCtx.getConf().readLACSpeculativeRequestPolicy.isPresent()) {
speculativeTask = clientCtx.getConf().readLACSpeculativeRequestPolicy.get()
.initiateSpeculativeRequest(clientCtx.getScheduler(), this);
}
}
void sendReadTo(int bookieIndex, BookieId to, ReadLACAndEntryRequest entry) throws InterruptedException {
if (LOG.isDebugEnabled()) {
LOG.debug("Calling Read LAC and Entry with {} and long polling interval {} on Bookie {} - Parallel {}",
prevEntryId, timeOutInMillis, to, parallelRead);
}
clientCtx.getBookieClient().readEntryWaitForLACUpdate(to,
lh.getId(),
BookieProtocol.LAST_ADD_CONFIRMED,
prevEntryId,
timeOutInMillis,
true,
this, new ReadLastConfirmedAndEntryContext(bookieIndex, to));
this.numResponsesPending++;
}
/**
* Wrapper to get all recovered data from the request.
*/
interface LastConfirmedAndEntryCallback {
void readLastConfirmedAndEntryComplete(int rc, long lastAddConfirmed, LedgerEntry entry);
}
private void submitCallback(int rc) {
long latencyMicros = MathUtils.elapsedMicroSec(requestTimeNano);
LedgerEntry entry;
cancelSpeculativeTask(true);
if (BKException.Code.OK != rc) {
clientCtx.getClientStats().getReadLacAndEntryOpLogger()
.registerFailedEvent(latencyMicros, TimeUnit.MICROSECONDS);
entry = null;
} else {
// could received advanced lac, with no entry
clientCtx.getClientStats().getReadLacAndEntryOpLogger()
.registerSuccessfulEvent(latencyMicros, TimeUnit.MICROSECONDS);
if (request.entryImpl.getEntryBuffer() != null) {
entry = new LedgerEntry(request.entryImpl);
} else {
entry = null;
}
}
request.close();
cb.readLastConfirmedAndEntryComplete(rc, lastAddConfirmed, entry);
}
@Override
public void readEntryComplete(int rc, long ledgerId, long entryId, ByteBuf buffer, Object ctx) {
if (LOG.isTraceEnabled()) {
LOG.trace("{} received response for (lid={}, eid={}) : {}",
getClass().getName(), ledgerId, entryId, rc);
}
ReadLastConfirmedAndEntryContext rCtx = (ReadLastConfirmedAndEntryContext) ctx;
BookieId bookie = rCtx.getBookieAddress();
numResponsesPending--;
if (BKException.Code.OK == rc) {
if (LOG.isTraceEnabled()) {
LOG.trace("Received lastAddConfirmed (lac={}) from bookie({}) for (lid={}).",
rCtx.getLastAddConfirmed(), bookie, ledgerId);
}
if (rCtx.getLastAddConfirmed() > lastAddConfirmed) {
lastAddConfirmed = rCtx.getLastAddConfirmed();
lh.updateLastConfirmed(rCtx.getLastAddConfirmed(), 0L);
}
hasValidResponse = true;
if (entryId != BookieProtocol.LAST_ADD_CONFIRMED) {
buffer.retain();
if (!requestComplete.get() && request.complete(rCtx.getBookieIndex(), bookie, buffer, entryId)) {
// callback immediately
if (rCtx.getLacUpdateTimestamp().isPresent()) {
long elapsedMicros = TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()
- rCtx.getLacUpdateTimestamp().get());
elapsedMicros = Math.max(elapsedMicros, 0);
clientCtx.getClientStats().getReadLacAndEntryRespLogger()
.registerSuccessfulEvent(elapsedMicros, TimeUnit.MICROSECONDS);
}
// if the request has already completed, the buffer is not going to be used anymore, release it.
if (!completeRequest()) {
buffer.release();
}
heardFromHostsBitSet.set(rCtx.getBookieIndex(), true);
} else {
buffer.release();
}
} else {
emptyResponsesFromHostsBitSet.set(rCtx.getBookieIndex(), true);
if (lastAddConfirmed > prevEntryId) {
// received advanced lac
completeRequest();
} else if (emptyResponsesFromHostsBitSet.cardinality() >= numEmptyResponsesAllowed) {
if (LOG.isDebugEnabled()) {
LOG.debug("Completed readLACAndEntry(lid = {}, previousEntryId = {}) "
+ "after received {} empty responses ('{}').",
ledgerId, prevEntryId, emptyResponsesFromHostsBitSet.cardinality(),
emptyResponsesFromHostsBitSet);
}
completeRequest();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Received empty response for readLACAndEntry(lid = {}, previousEntryId = {}) from"
+ " bookie {} @ {}, reattempting reading next bookie : lac = {}",
ledgerId, prevEntryId, rCtx.getBookieAddress(),
rCtx.getBookieAddress(), lastAddConfirmed);
}
request.logErrorAndReattemptRead(rCtx.getBookieIndex(), bookie, "Empty Response", rc);
}
return;
}
} else if (BKException.Code.UnauthorizedAccessException == rc && !requestComplete.get()) {
submitCallback(rc);
requestComplete.set(true);
} else {
request.logErrorAndReattemptRead(rCtx.getBookieIndex(), bookie, "Error: " + BKException.getMessage(rc), rc);
return;
}
if (numResponsesPending <= 0) {
completeRequest();
}
}
private boolean completeRequest() {
boolean requestCompleted = requestComplete.compareAndSet(false, true);
if (requestCompleted) {
if (!hasValidResponse) {
// no success called
submitCallback(request.getFirstError());
} else {
// callback
submitCallback(BKException.Code.OK);
}
}
return requestCompleted;
}
@Override
public String toString() {
return String.format("ReadLastConfirmedAndEntryOp(lid=%d, prevEntryId=%d])", lh.getId(), prevEntryId);
}
}
| 376 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/BookKeeperClientStatsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.client.impl;
import static org.apache.bookkeeper.client.BookKeeperClientStats.CATEGORY_CLIENT;
import static org.apache.bookkeeper.client.BookKeeperClientStats.CLIENT_SCOPE;
import org.apache.bookkeeper.client.BookKeeperClientStats;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* The default implementation of {@link BookKeeperClientStats}.
*/
@StatsDoc(
name = CLIENT_SCOPE,
category = CATEGORY_CLIENT,
help = "BookKeeper client stats"
)
public class BookKeeperClientStatsImpl implements BookKeeperClientStats {
private final StatsLogger stats;
@StatsDoc(
name = CREATE_OP,
help = "operation stats of creating ledgers"
)
private final OpStatsLogger createOpLogger;
@StatsDoc(
name = DELETE_OP,
help = "operation stats of deleting ledgers"
)
private final OpStatsLogger deleteOpLogger;
@StatsDoc(
name = OPEN_OP,
help = "operation stats of opening ledgers"
)
private final OpStatsLogger openOpLogger;
@StatsDoc(
name = RECOVER_OP,
help = "operation stats of recovering ledgers"
)
private final OpStatsLogger recoverOpLogger;
@StatsDoc(
name = READ_OP,
help = "operation stats of reading entries requests"
)
private final OpStatsLogger readOpLogger;
@StatsDoc(
name = READ_OP_DM,
help = "the number of read entries hitting DigestMismatch errors"
)
private final Counter readOpDmCounter;
@StatsDoc(
name = READ_LAST_CONFIRMED_AND_ENTRY,
help = "operation stats of read_last_confirmed_and_entry requests"
)
private final OpStatsLogger readLacAndEntryOpLogger;
@StatsDoc(
name = READ_LAST_CONFIRMED_AND_ENTRY_RESPONSE,
help = "operation stats of read_last_confirmed_and_entry responses"
)
private final OpStatsLogger readLacAndEntryRespLogger;
@StatsDoc(
name = ADD_OP,
help = "operation stats of adding entries requests"
)
private final OpStatsLogger addOpLogger;
@StatsDoc(
name = FORCE_OP,
help = "operation stats of force requests"
)
private final OpStatsLogger forceOpLogger;
@StatsDoc(
name = ADD_OP_UR,
help = "the number of add entries under replication"
)
private final Counter addOpUrCounter;
@StatsDoc(
name = WRITE_LAC_OP,
help = "operation stats of write_lac requests"
)
private final OpStatsLogger writeLacOpLogger;
@StatsDoc(
name = READ_LAC_OP,
help = "operation stats of read_lac requests"
)
private final OpStatsLogger readLacOpLogger;
@StatsDoc(
name = LEDGER_RECOVER_ADD_ENTRIES,
help = "the distribution of entries written in ledger recovery requests"
)
private final OpStatsLogger recoverAddEntriesStats;
@StatsDoc(
name = LEDGER_RECOVER_READ_ENTRIES,
help = "the distribution of entries read in ledger recovery requests"
)
private final OpStatsLogger recoverReadEntriesStats;
@StatsDoc(
name = ENSEMBLE_CHANGES,
help = "The number of ensemble changes"
)
private final Counter ensembleChangeCounter;
@StatsDoc(
name = LAC_UPDATE_HITS,
help = "The number of successful lac updates on piggybacked responses"
)
private final Counter lacUpdateHitsCounter;
@StatsDoc(
name = LAC_UPDATE_MISSES,
help = "The number of unsuccessful lac updates on piggybacked responses"
)
private final Counter lacUpdateMissesCounter;
@StatsDoc(
name = CLIENT_CHANNEL_WRITE_WAIT,
help = " The latency distribution of waiting time on channel being writable"
)
private final OpStatsLogger clientChannelWriteWaitStats;
@StatsDoc(
name = SPECULATIVE_READ_COUNT,
help = "The number of speculative read requests"
)
private final Counter speculativeReadCounter;
@StatsDoc(
name = WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS_LATENCY,
help = "The delay in write completion because min number of fault domains was not reached"
)
private final OpStatsLogger writeDelayedDueToNotEnoughFaultDomainsLatency;
@StatsDoc(
name = WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS,
help = "The number of times write completion was delayed because min number of fault domains was not reached"
)
private final Counter writeDelayedDueToNotEnoughFaultDomains;
@StatsDoc(
name = WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS,
help = "The number of times write completion timed out because min number of fault domains was not reached"
)
private final Counter writeTimedOutDueToNotEnoughFaultDomains;
public BookKeeperClientStatsImpl(StatsLogger stats) {
this.stats = stats;
this.createOpLogger = stats.getOpStatsLogger(CREATE_OP);
this.deleteOpLogger = stats.getOpStatsLogger(DELETE_OP);
this.openOpLogger = stats.getOpStatsLogger(OPEN_OP);
this.recoverOpLogger = stats.getOpStatsLogger(RECOVER_OP);
this.readOpLogger = stats.getOpStatsLogger(READ_OP);
this.readOpDmCounter = stats.getCounter(READ_OP_DM);
this.readLacAndEntryOpLogger = stats.getOpStatsLogger(READ_LAST_CONFIRMED_AND_ENTRY);
this.readLacAndEntryRespLogger = stats.getOpStatsLogger(READ_LAST_CONFIRMED_AND_ENTRY_RESPONSE);
this.addOpLogger = stats.getOpStatsLogger(ADD_OP);
this.forceOpLogger = stats.getOpStatsLogger(FORCE_OP);
this.addOpUrCounter = stats.getCounter(ADD_OP_UR);
this.writeLacOpLogger = stats.getOpStatsLogger(WRITE_LAC_OP);
this.readLacOpLogger = stats.getOpStatsLogger(READ_LAC_OP);
this.recoverAddEntriesStats = stats.getOpStatsLogger(LEDGER_RECOVER_ADD_ENTRIES);
this.recoverReadEntriesStats = stats.getOpStatsLogger(LEDGER_RECOVER_READ_ENTRIES);
this.ensembleChangeCounter = stats.getCounter(ENSEMBLE_CHANGES);
this.lacUpdateHitsCounter = stats.getCounter(LAC_UPDATE_HITS);
this.lacUpdateMissesCounter = stats.getCounter(LAC_UPDATE_MISSES);
this.clientChannelWriteWaitStats = stats.getOpStatsLogger(CLIENT_CHANNEL_WRITE_WAIT);
speculativeReadCounter = stats.getCounter(SPECULATIVE_READ_COUNT);
this.writeDelayedDueToNotEnoughFaultDomainsLatency =
stats.getOpStatsLogger(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS_LATENCY);
this.writeDelayedDueToNotEnoughFaultDomains = stats.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS);
this.writeTimedOutDueToNotEnoughFaultDomains =
stats.getCounter(WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS);
}
@Override
public OpStatsLogger getCreateOpLogger() {
return createOpLogger;
}
@Override
public OpStatsLogger getOpenOpLogger() {
return openOpLogger;
}
@Override
public OpStatsLogger getDeleteOpLogger() {
return deleteOpLogger;
}
@Override
public OpStatsLogger getRecoverOpLogger() {
return recoverOpLogger;
}
@Override
public OpStatsLogger getReadOpLogger() {
return readOpLogger;
}
@Override
public OpStatsLogger getReadLacAndEntryOpLogger() {
return readLacAndEntryOpLogger;
}
@Override
public OpStatsLogger getReadLacAndEntryRespLogger() {
return readLacAndEntryRespLogger;
}
@Override
public OpStatsLogger getAddOpLogger() {
return addOpLogger;
}
@Override
public OpStatsLogger getForceOpLogger() {
return forceOpLogger;
}
@Override
public OpStatsLogger getWriteLacOpLogger() {
return writeLacOpLogger;
}
@Override
public OpStatsLogger getReadLacOpLogger() {
return readLacOpLogger;
}
@Override
public OpStatsLogger getRecoverAddCountLogger() {
return recoverAddEntriesStats;
}
@Override
public OpStatsLogger getRecoverReadCountLogger() {
return recoverReadEntriesStats;
}
@Override
public Counter getReadOpDmCounter() {
return readOpDmCounter;
}
@Override
public Counter getAddOpUrCounter() {
return addOpUrCounter;
}
@Override
public Counter getSpeculativeReadCounter() {
return speculativeReadCounter;
}
@Override
public Counter getEnsembleChangeCounter() {
return ensembleChangeCounter;
}
@Override
public Counter getLacUpdateHitsCounter() {
return lacUpdateHitsCounter;
}
@Override
public Counter getLacUpdateMissesCounter() {
return lacUpdateMissesCounter;
}
@Override
public OpStatsLogger getClientChannelWriteWaitLogger() {
return clientChannelWriteWaitStats;
}
@Override
public Counter getEnsembleBookieDistributionCounter(String bookie) {
return stats.scopeLabel(BOOKIE_LABEL, bookie).getCounter(LEDGER_ENSEMBLE_BOOKIE_DISTRIBUTION);
}
@Override
public OpStatsLogger getWriteDelayedDueToNotEnoughFaultDomainsLatency() {
return writeDelayedDueToNotEnoughFaultDomainsLatency;
}
@Override
public Counter getWriteDelayedDueToNotEnoughFaultDomains() {
return writeDelayedDueToNotEnoughFaultDomains;
}
@Override
public Counter getWriteTimedOutDueToNotEnoughFaultDomains() {
return writeTimedOutDueToNotEnoughFaultDomains;
}
@Override
public void registerPendingAddsGauge(Gauge<Integer> gauge) {
stats.registerGauge(PENDING_ADDS, gauge);
}
}
| 377 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/BookKeeperBuilderImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.impl;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.EventLoopGroup;
import io.netty.util.HashedWheelTimer;
import java.io.IOException;
import org.apache.bookkeeper.client.api.BKException;
import org.apache.bookkeeper.client.api.BookKeeper;
import org.apache.bookkeeper.client.api.BookKeeperBuilder;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* Internal builder for {@link org.apache.bookkeeper.client.api.BookKeeper} client.
*
* @since 4.6
*/
public class BookKeeperBuilderImpl implements BookKeeperBuilder {
private final org.apache.bookkeeper.client.BookKeeper.Builder builder;
public BookKeeperBuilderImpl(ClientConfiguration conf) {
this.builder = org.apache.bookkeeper.client.BookKeeper.forConfig(conf);
}
@Override
public BookKeeperBuilder eventLoopGroup(EventLoopGroup eventLoopGroup) {
builder.eventLoopGroup(eventLoopGroup);
return this;
}
@Override
public BookKeeperBuilder allocator(ByteBufAllocator allocator) {
builder.allocator(allocator);
return this;
}
@Override
public BookKeeperBuilder statsLogger(StatsLogger statsLogger) {
builder.statsLogger(statsLogger);
return this;
}
@Override
public BookKeeperBuilder dnsResolver(DNSToSwitchMapping dnsResolver) {
builder.dnsResolver(dnsResolver);
return this;
}
@Override
public BookKeeperBuilder requestTimer(HashedWheelTimer requeestTimer) {
builder.requestTimer(requeestTimer);
return this;
}
@Override
public BookKeeperBuilder featureProvider(FeatureProvider featureProvider) {
builder.featureProvider(featureProvider);
return this;
}
@Override
public BookKeeper build() throws InterruptedException, BKException, IOException {
return builder.build();
}
}
| 378 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/OpenBuilderBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.impl;
import java.util.Arrays;
import org.apache.bookkeeper.client.LedgerHandle;
import org.apache.bookkeeper.client.api.BKException.Code;
import org.apache.bookkeeper.client.api.DigestType;
import org.apache.bookkeeper.client.api.OpenBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for open builders which does the mundane builder stuff.
*/
public abstract class OpenBuilderBase implements OpenBuilder {
static final Logger LOG = LoggerFactory.getLogger(OpenBuilderBase.class);
protected boolean recovery = false;
protected long ledgerId = LedgerHandle.INVALID_LEDGER_ID;
protected byte[] password;
protected DigestType digestType = DigestType.CRC32;
@Override
public OpenBuilder withLedgerId(long ledgerId) {
this.ledgerId = ledgerId;
return this;
}
@Override
public OpenBuilder withRecovery(boolean recovery) {
this.recovery = recovery;
return this;
}
@Override
public OpenBuilder withPassword(byte[] password) {
this.password = Arrays.copyOf(password, password.length);
return this;
}
@Override
public OpenBuilder withDigestType(DigestType digestType) {
this.digestType = digestType;
return this;
}
protected int validate() {
if (ledgerId < 0) {
LOG.error("invalid ledgerId {} < 0", ledgerId);
return Code.NoSuchLedgerExistsOnMetadataServerException;
}
return Code.OK;
}
}
| 379 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/LastConfirmedAndEntryImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.impl;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
import org.apache.bookkeeper.client.api.LedgerEntry;
/**
* This contains LastAddConfirmed entryId and a LedgerEntry wanted to read.
* It is used for readLastAddConfirmedAndEntry.
*/
public class LastConfirmedAndEntryImpl implements LastConfirmedAndEntry {
private static final Recycler<LastConfirmedAndEntryImpl> RECYCLER = new Recycler<LastConfirmedAndEntryImpl>() {
@Override
protected LastConfirmedAndEntryImpl newObject(Handle<LastConfirmedAndEntryImpl> handle) {
return new LastConfirmedAndEntryImpl(handle);
}
};
public static LastConfirmedAndEntryImpl create(long lac, org.apache.bookkeeper.client.LedgerEntry entry) {
LastConfirmedAndEntryImpl entryImpl = RECYCLER.get();
entryImpl.lac = lac;
if (null == entry) {
entryImpl.entry = null;
} else {
entryImpl.entry = LedgerEntryImpl.create(
entry.getLedgerId(),
entry.getEntryId(),
entry.getLength(),
entry.getEntryBuffer());
}
return entryImpl;
}
private final Handle<LastConfirmedAndEntryImpl> recycleHandle;
private Long lac;
private LedgerEntry entry;
public LastConfirmedAndEntryImpl(Handle<LastConfirmedAndEntryImpl> handle) {
this.recycleHandle = handle;
}
/**
* {@inheritDoc}
*/
@Override
public long getLastAddConfirmed() {
return lac;
}
/**
* {@inheritDoc}
*/
@Override
public boolean hasEntry() {
return entry != null;
}
/**
* {@inheritDoc}
*/
@Override
public LedgerEntry getEntry() {
return entry;
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
this.lac = -1L;
if (null != entry) {
entry.close();
entry = null;
}
recycleHandle.recycle(this);
}
}
| 380 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/LedgerEntriesImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.impl;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import io.netty.util.Recycler;
import java.util.Iterator;
import java.util.List;
import org.apache.bookkeeper.client.api.LedgerEntries;
import org.apache.bookkeeper.client.api.LedgerEntry;
/**
* Ledger entries implementation. It is a simple wrap of a list of ledger entries.
*/
public class LedgerEntriesImpl implements LedgerEntries {
private List<LedgerEntry> entries;
private final Recycler.Handle<LedgerEntriesImpl> recyclerHandle;
private LedgerEntriesImpl(Recycler.Handle<LedgerEntriesImpl> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
private static final Recycler<LedgerEntriesImpl> RECYCLER = new Recycler<LedgerEntriesImpl>() {
@Override
protected LedgerEntriesImpl newObject(Recycler.Handle<LedgerEntriesImpl> handle) {
return new LedgerEntriesImpl(handle);
}
};
private void recycle() {
releaseByteBuf();
recyclerHandle.recycle(this);
}
private void releaseByteBuf() {
if (entries != null) {
entries.forEach(LedgerEntry::close);
entries.clear();
entries = null;
}
}
/**
* Create ledger entries.
*
* @param entries the entries with ordering
* @return the LedgerEntriesImpl
*/
public static LedgerEntriesImpl create(List<LedgerEntry> entries) {
checkArgument(!entries.isEmpty(), "entries for create should not be empty.");
LedgerEntriesImpl ledgerEntries = RECYCLER.get();
ledgerEntries.entries = entries;
return ledgerEntries;
}
/**
* {@inheritDoc}
*/
@Override
public LedgerEntry getEntry(long entryId) {
checkNotNull(entries, "entries has been recycled");
long firstId = entries.get(0).getEntryId();
long lastId = entries.get(entries.size() - 1).getEntryId();
if (entryId < firstId || entryId > lastId) {
throw new IndexOutOfBoundsException("required index: " + entryId
+ " is out of bounds: [ " + firstId + ", " + lastId + " ].");
}
return entries.get((int) (entryId - firstId));
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<LedgerEntry> iterator() {
checkNotNull(entries, "entries has been recycled");
return entries.iterator();
}
@Override
public void close(){
recycle();
}
}
| 381 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/LedgerEntryImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.impl;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufUtil;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import io.netty.util.ReferenceCountUtil;
import java.nio.ByteBuffer;
import org.apache.bookkeeper.client.api.LedgerEntry;
/**
* Ledger entry. Its a simple tuple containing the ledger id, the entry-id, and
* the entry content.
*/
public class LedgerEntryImpl implements LedgerEntry {
private static final Recycler<LedgerEntryImpl> RECYCLER = new Recycler<LedgerEntryImpl>() {
@Override
protected LedgerEntryImpl newObject(Handle<LedgerEntryImpl> handle) {
return new LedgerEntryImpl(handle);
}
};
public static LedgerEntryImpl create(long ledgerId,
long entryId) {
LedgerEntryImpl entry = RECYCLER.get();
entry.ledgerId = ledgerId;
entry.entryId = entryId;
return entry;
}
public static LedgerEntryImpl create(long ledgerId,
long entryId,
long length,
ByteBuf buf) {
LedgerEntryImpl entry = RECYCLER.get();
entry.ledgerId = ledgerId;
entry.entryId = entryId;
entry.length = length;
entry.entryBuf = buf;
return entry;
}
public static LedgerEntryImpl duplicate(LedgerEntry entry) {
return create(
entry.getLedgerId(),
entry.getEntryId(),
entry.getLength(),
entry.getEntryBuffer().retainedSlice());
}
private final Handle<LedgerEntryImpl> recycleHandle;
private long ledgerId;
private long entryId;
private long length;
private ByteBuf entryBuf;
private LedgerEntryImpl(Handle<LedgerEntryImpl> handle) {
this.recycleHandle = handle;
}
public void setEntryId(long entryId) {
this.entryId = entryId;
}
public void setLength(long length) {
this.length = length;
}
public void setEntryBuf(ByteBuf buf) {
ReferenceCountUtil.release(entryBuf);
this.entryBuf = buf;
}
/**
* {@inheritDoc}
*/
@Override
public long getLedgerId() {
return ledgerId;
}
/**
* {@inheritDoc}
*/
@Override
public long getEntryId() {
return entryId;
}
/**
* {@inheritDoc}
*/
@Override
public long getLength() {
return length;
}
/**
* {@inheritDoc}
*/
@Override
public byte[] getEntryBytes() {
return ByteBufUtil.getBytes(entryBuf, entryBuf.readerIndex(), entryBuf.readableBytes(), false);
}
/**
* {@inheritDoc}
*/
@Override
public ByteBuf getEntryBuffer() {
return entryBuf;
}
/**
* {@inheritDoc}
*/
@Override
public ByteBuffer getEntryNioBuffer() {
return entryBuf.nioBuffer();
}
/**
* {@inheritDoc}
*/
@Override
public LedgerEntryImpl duplicate() {
return duplicate(this);
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
recycle();
}
private void recycle() {
this.ledgerId = -1L;
this.entryId = -1L;
this.length = -1L;
ReferenceCountUtil.release(entryBuf);
this.entryBuf = null;
recycleHandle.recycle(this);
}
}
| 382 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/impl/package-info.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
/**
* BookKeeper Client implementation package.
*
* @since 4.6
*/
package org.apache.bookkeeper.client.impl;
| 383 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/BKException.java | /*
* Copyright 2017 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.api;
import java.lang.reflect.Field;
import java.util.function.Function;
import org.apache.bookkeeper.client.LedgerHandleAdv;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Super class for all errors which occur using BookKeeper client.
*
* @since 4.6
*/
@Public
@Unstable
public class BKException extends Exception {
static final Function<Throwable, BKException> HANDLER = cause -> {
if (cause == null) {
return null;
}
if (cause instanceof BKException) {
return (BKException) cause;
} else {
BKException ex = new BKException(Code.UnexpectedConditionException);
ex.initCause(cause);
return ex;
}
};
protected final int code;
private static final LogMessagePool logMessagePool = new LogMessagePool();
/**
* Create a new exception.
*
* @param code the error code
*
* @see Code
*/
public BKException(int code) {
super(getMessage(code));
this.code = code;
}
/**
* Create a new exception with the <tt>cause</tt>.
*
* @param code exception code
* @param cause the exception cause
*/
public BKException(int code, Throwable cause) {
super(getMessage(code), cause);
this.code = code;
}
/**
* Get the return code for the exception.
*
* @return the error code
*
* @see Code
*/
public final int getCode() {
return this.code;
}
/**
* Returns a lazy error code formatter suitable to pass to log functions.
*
* @param code the error code value
*
* @return lazy error code log formatter
*/
public static Object codeLogger(int code) {
return logMessagePool.get(code);
}
/**
* Describe an error code.
*
* @param code the error code value
*
* @return the description of the error code
*/
public static String getMessage(int code) {
switch (code) {
case Code.OK:
return "No problem";
case Code.ReadException:
return "Error while reading ledger";
case Code.QuorumException:
return "Invalid quorum size on ensemble size";
case Code.NoBookieAvailableException:
return "No bookie available";
case Code.DigestNotInitializedException:
return "Digest engine not initialized";
case Code.DigestMatchException:
return "Entry digest does not match";
case Code.NotEnoughBookiesException:
return "Not enough non-faulty bookies available";
case Code.NoSuchLedgerExistsException:
return "No such ledger exists on Bookies";
case Code.NoSuchLedgerExistsOnMetadataServerException:
return "No such ledger exists on Metadata Server";
case Code.BookieHandleNotAvailableException:
return "Bookie handle is not available";
case Code.ZKException:
return "Error while using ZooKeeper";
case Code.MetaStoreException:
return "Error while using MetaStore";
case Code.LedgerExistException:
return "Ledger existed";
case Code.LedgerRecoveryException:
return "Error while recovering ledger";
case Code.LedgerClosedException:
return "Attempt to write to a closed ledger";
case Code.WriteException:
return "Write failed on bookie";
case Code.NoSuchEntryException:
return "No such entry";
case Code.IncorrectParameterException:
return "Incorrect parameter input";
case Code.InterruptedException:
return "Interrupted while waiting for permit";
case Code.ProtocolVersionException:
return "Bookie protocol version on server is incompatible with client";
case Code.MetadataVersionException:
return "Bad ledger metadata version";
case Code.DuplicateEntryIdException:
return "Attempted to add Duplicate entryId";
case Code.LedgerFencedException:
return "Ledger has been fenced off. Some other client must have opened it to read";
case Code.UnauthorizedAccessException:
return "Attempted to access ledger using the wrong password";
case Code.UnclosedFragmentException:
return "Attempting to use an unclosed fragment; This is not safe";
case Code.WriteOnReadOnlyBookieException:
return "Attempting to write on ReadOnly bookie";
case Code.TooManyRequestsException:
return "Too many requests to the same Bookie";
case Code.LedgerIdOverflowException:
return "Next ledgerID is too large.";
case Code.ReplicationException:
return "Errors in replication pipeline";
case Code.ClientClosedException:
return "BookKeeper client is closed";
case Code.IllegalOpException:
return "Invalid operation";
case Code.AddEntryQuorumTimeoutException:
return "Add entry quorum wait timed out";
case Code.TimeoutException:
return "Bookie operation timeout";
case Code.SecurityException:
return "Failed to establish a secure connection";
case Code.MetadataSerializationException:
return "Failed to serialize metadata";
case Code.DataUnknownException:
return "Ledger in limbo";
default:
return "Unexpected condition";
}
}
/**
* Codes which represent the various exception types.
*/
public interface Code {
/** A placer holder (unused). */
int UNINITIALIZED = 1;
/** Everything is OK. */
int OK = 0;
/** Read operations failed (bookie error). */
int ReadException = -1;
/** Unused. */
int QuorumException = -2;
/** Unused. */
int NoBookieAvailableException = -3;
/** Digest Manager is not initialized (client error). */
int DigestNotInitializedException = -4;
/** Digest doesn't match on returned entries. */
int DigestMatchException = -5;
/** Not enough bookies available to form an ensemble. */
int NotEnoughBookiesException = -6;
/** No such ledger exists. */
int NoSuchLedgerExistsException = -7;
/** Bookies are not available. */
int BookieHandleNotAvailableException = -8;
/** ZooKeeper operations failed. */
int ZKException = -9;
/** Ledger recovery operations failed. */
int LedgerRecoveryException = -10;
/** Executing operations on a closed ledger handle. */
int LedgerClosedException = -11;
/** Write operations failed (bookie error). */
int WriteException = -12;
/** No such entry exists. */
int NoSuchEntryException = -13;
/** Incorrect parameters (operations are absolutely not executed). */
int IncorrectParameterException = -14;
/** Synchronous operations are interrupted. */
int InterruptedException = -15;
/** Protocol version is wrong (operations are absolutely not executed). */
int ProtocolVersionException = -16;
/** Bad version on executing metadata operations. */
int MetadataVersionException = -17;
/** Meta store operations failed. */
int MetaStoreException = -18;
/** Executing operations on a closed client. */
int ClientClosedException = -19;
/** Ledger already exists. */
int LedgerExistException = -20;
/**
* Add entry operation timeouts on waiting quorum responses.
*
* @since 4.5
*/
int AddEntryQuorumTimeoutException = -21;
/**
* Duplicated entry id is found when {@link LedgerHandleAdv#addEntry(long, byte[])}.
*
* @since 4.5
*/
int DuplicateEntryIdException = -22;
/**
* Operations timeouts.
*
* @since 4.5
*/
int TimeoutException = -23;
int SecurityException = -24;
/** No such ledger exists one metadata server. */
int NoSuchLedgerExistsOnMetadataServerException = -25;
/**
* Operation is illegal.
*/
int IllegalOpException = -100;
/**
* Operations failed due to ledgers are fenced.
*/
int LedgerFencedException = -101;
/**
* Operations failed due to unauthorized.
*/
int UnauthorizedAccessException = -102;
/**
* Replication failed due to unclosed fragments.
*/
int UnclosedFragmentException = -103;
/**
* Write operations failed due to bookies are readonly.
*/
int WriteOnReadOnlyBookieException = -104;
/**
* Operations failed due to too many requests in the queue.
*/
int TooManyRequestsException = -105;
/**
* Ledger id overflow happens on ledger manager.
*
* @since 4.5
*/
int LedgerIdOverflowException = -106;
/**
* Failure to serialize metadata.
*
* @since 4.9
*/
int MetadataSerializationException = -107;
/**
* Operations failed due to ledger data in an unknown state.
*/
int DataUnknownException = -108;
/**
* Generic exception code used to propagate in replication pipeline.
*/
int ReplicationException = -200;
/**
* Unexpected condition.
*/
int UnexpectedConditionException = -999;
}
/**
* Code log message pool.
*/
private static class LogMessagePool {
private final int minCode;
private final String[] pool;
private LogMessagePool() {
Field[] fields = Code.class.getDeclaredFields();
this.minCode = minCode(fields);
this.pool = new String[-minCode + 2]; // UnexpectedConditionException is an outlier
initPoolMessages(fields);
}
private int minCode(Field[] fields) {
int min = 0;
for (Field field : fields) {
int code = getFieldInt(field);
if (code < min && code > Code.UnexpectedConditionException) {
min = code;
}
}
return min;
}
private void initPoolMessages(Field[] fields) {
for (Field field : fields) {
int code = getFieldInt(field);
int index = poolIndex(code);
if (index >= 0) {
pool[index] = String.format("%s: %s", field.getName(), getMessage(code));
}
}
}
private static int getFieldInt(Field field) {
try {
return field.getInt(null);
} catch (IllegalAccessException e) {
return -1;
}
}
private Object get(int code) {
int index = poolIndex(code);
String logMessage = index >= 0 ? pool[index] : null;
return logMessage != null ? logMessage : new UnrecognizedCodeLogFormatter(code);
}
private int poolIndex(int code) {
switch (code) {
case Code.UnexpectedConditionException:
return -minCode + 1;
default:
return code <= 0 && code >= minCode ? -minCode + code : -1;
}
}
/**
* Unrecognized code lazy log message formatter.
*/
private static class UnrecognizedCodeLogFormatter {
private final int code;
private UnrecognizedCodeLogFormatter(int code) {
this.code = code;
}
@Override
public String toString() {
return String.format("%d: %s", code, getMessage(code));
}
}
}
}
| 384 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/DigestType.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Digest type.
*
* @since 4.6
*/
@Public
@Unstable
public enum DigestType {
/**
* Entries are verified by applied CRC32 algorithm.
*/
CRC32,
/**
* Entries are verified by applied MAC algorithm.
*/
MAC,
/**
* Entries are verified by applied CRC32C algorithm.
*/
CRC32C,
/**
* Entries are not verified.
*/
DUMMY,
}
| 385 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.conf.ClientConfiguration;
/**
* Builder-style interface to open exiting ledgers.
*
* @since 4.6
*/
@Public
@Unstable
public interface OpenBuilder extends OpBuilder<ReadHandle> {
/**
* Set the id of the ledger to be opened.
*
* @param ledgerId
*
* @return the builder itself
*/
OpenBuilder withLedgerId(long ledgerId);
/**
* Define to open the ledger in recovery mode or in readonly mode. In recovery mode the ledger will be fenced and
* the writer of the ledger will be prevented from issuing other writes to the ledger. It defaults to 'false'
*
* @param recovery recovery mode
*
* @return the builder itself
*/
OpenBuilder withRecovery(boolean recovery);
/**
* Sets the password to be used to open the ledger. It defauls to an empty password
*
* @param password the password to unlock the ledger
*
* @return the builder itself
*/
OpenBuilder withPassword(byte[] password);
/**
* Sets the expected digest type used to check the contents of the ledger. It defaults to {@link DigestType#CRC32}.
* If {@link ClientConfiguration#setEnableDigestTypeAutodetection(boolean) } is set to true this value is ignored
* and the digest type is read directly from metadata
*
* @param digestType the type of digest
*
* @return the builder itself
*/
OpenBuilder withDigestType(DigestType digestType);
}
| 386 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Provide write access to a ledger.
*
* @see WriteAdvHandle
*
* @since 4.6
*/
@Public
@Unstable
public interface WriteHandle extends ReadHandle, ForceableHandle {
/**
* Add entry asynchronously to an open ledger.
*
* @param data a bytebuf to be written. The bytebuf's reference count will be decremented by 1 after the
* completable future is returned
* do not reuse the buffer, bk-client will release it appropriately.
* @return an handle to the result, in case of success it will return the id of the newly appended entry
*/
CompletableFuture<Long> appendAsync(ByteBuf data);
/**
* Add entry synchronously to an open ledger.
*
* @param data a bytebuf to be written. The bytebuf's reference count will be decremented by 1 after the
* call completes.
* do not reuse the buffer, bk-client will release it appropriately.
* @return the id of the newly appended entry
*/
default long append(ByteBuf data) throws BKException, InterruptedException {
return FutureUtils.<Long, BKException>result(appendAsync(data), BKException.HANDLER);
}
/**
* Add entry asynchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return an handle to the result, in case of success it will return the id of the newly appended entry
*/
default CompletableFuture<Long> appendAsync(ByteBuffer data) {
return appendAsync(Unpooled.wrappedBuffer(data));
}
/**
* Add entry synchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return the id of the newly appended entry
*/
default long append(ByteBuffer data) throws BKException, InterruptedException {
return append(Unpooled.wrappedBuffer(data));
}
/**
* Add an entry asynchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return a completable future represents the add result, in case of success the future returns the entry id
* of this newly appended entry
*/
default CompletableFuture<Long> appendAsync(byte[] data) {
return appendAsync(Unpooled.wrappedBuffer(data));
}
/**
* Add an entry synchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return the entry id of this newly appended entry
*/
default long append(byte[] data) throws BKException, InterruptedException {
return append(Unpooled.wrappedBuffer(data));
}
/**
* Add an entry asynchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @param offset the offset in the bytes array
* @param length the length of the bytes to be appended
* @return a completable future represents the add result, in case of success the future returns the entry id
* of this newly appended entry
*/
default CompletableFuture<Long> appendAsync(byte[] data, int offset, int length) {
return appendAsync(Unpooled.wrappedBuffer(data, offset, length));
}
/**
* Add an entry synchronously to an open ledger.
*
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @param offset the offset in the bytes array
* @param length the length of the bytes to be appended
* @return the entry id of this newly appended entry
*/
default long append(byte[] data, int offset, int length) throws BKException, InterruptedException {
return append(Unpooled.wrappedBuffer(data, offset, length));
}
/**
* Get the entry id of the last entry that has been enqueued for addition (but
* may not have possibly been persisted to the ledger).
*
* @return the entry id of the last entry pushed or -1 if no entry has been pushed
*/
long getLastAddPushed();
/**
* Asynchronous close the write handle, any adds in flight will return errors.
*
* <p>Closing a ledger will ensure that all clients agree on what the last
* entry of the ledger is. Once the ledger has been closed, all reads from the
* ledger will return the same set of entries.
*
* <p>The close operation can error if it finds conflicting metadata when it
* tries to write to the metadata store. On close, the metadata state is set to
* closed and lastEntry and length of the ledger are fixed in the metadata. A
* conflict occurs if the metadata in the metadata store has a different value for
* the lastEntry or length. If another process has updated the metadata, setting it
* to closed, but have fixed the lastEntry and length to the same values as this
* process is trying to write, the operation completes successfully.
*
* @return an handle to access the result of the operation
*/
@Override
CompletableFuture<Void> closeAsync();
/**
* Synchronous close the write handle, any adds in flight will return errors.
*
* <p>Closing a ledger will ensure that all clients agree on what the last
* entry of the ledger is. Once the ledger has been closed, all reads from the
* ledger will return the same set of entries.
*
* <p>The close operation can error if it finds conflicting metadata when it
* tries to write to the metadata store. On close, the metadata state is set to
* closed and lastEntry and length of the ledger are fixed in the metadata. A
* conflict occurs if the metadata in the metadata store has a different value for
* the lastEntry or length. If another process has updated the metadata, setting it
* to closed, but have fixed the lastEntry and length to the same values as this
* process is trying to write, the operation completes successfully.
*/
@Override
default void close() throws BKException, InterruptedException {
FutureUtils.<Void, BKException>result(closeAsync(), BKException.HANDLER);
}
}
| 387 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Provide write access to a ledger. Using WriteAdvHandler the writer MUST explictly set an entryId. Beware that the
* write for a given entryId will be acknowledged if and only if all entries up to entryId - 1 have been acknowledged
* too (expected from entryId 0)
*
* @see WriteHandle
*
* @since 4.6
*/
@Public
@Unstable
public interface WriteAdvHandle extends ReadHandle, ForceableHandle {
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId entryId to be added
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return an handle to the result, in case of success it will return the same value of param entryId.
*/
default CompletableFuture<Long> writeAsync(final long entryId, final ByteBuffer data) {
return writeAsync(entryId, Unpooled.wrappedBuffer(data));
}
/**
* Add entry synchronously to an open ledger.
*
* @param entryId entryId to be added
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return the same value of param entryId.
*/
default long write(final long entryId, final ByteBuffer data)
throws BKException, InterruptedException {
return write(entryId, Unpooled.wrappedBuffer(data));
}
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId entryId to be added.
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return an handle to the result, in case of success it will return the same value of param {@code entryId}.
*/
default CompletableFuture<Long> writeAsync(final long entryId, final byte[] data) {
return writeAsync(entryId, Unpooled.wrappedBuffer(data));
}
/**
* Add entry synchronously to an open ledger.
*
* @param entryId entryId to be added.
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return same value of param {@code entryId}.
*/
default long write(final long entryId, final byte[] data)
throws BKException, InterruptedException {
return write(entryId, Unpooled.wrappedBuffer(data));
}
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId entryId to be added.
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @param offset the offset of the bytes array
* @param length the length to data to write
* @return an handle to the result, in case of success it will return the same value of param {@code entryId}.
*/
default CompletableFuture<Long> writeAsync(final long entryId, final byte[] data, int offset, int length) {
return writeAsync(entryId, Unpooled.wrappedBuffer(data, offset, length));
}
/**
* Add entry synchronously to an open ledger.
*
* @param entryId entryId to be added.
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @param offset the offset of the bytes array
* @param length the length to data to write
* @return the same value of param {@code entryId}.
*/
default long write(final long entryId, final byte[] data, int offset, int length)
throws BKException, InterruptedException {
return write(entryId, Unpooled.wrappedBuffer(data, offset, length));
}
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId entryId to be added
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return an handle to the result, in case of success it will return the same value of param entryId
*/
CompletableFuture<Long> writeAsync(long entryId, ByteBuf data);
/**
* Add entry asynchronously to an open ledger.
*
* @param entryId entryId to be added
* @param data array of bytes to be written
* do not reuse the buffer, bk-client will release it appropriately.
* @return the same value of param entryId
*/
default long write(long entryId, ByteBuf data) throws BKException, InterruptedException {
return FutureUtils.<Long, BKException>result(writeAsync(entryId, data), BKException.HANDLER);
}
}
| 388 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ListLedgersResult.java | /*
* Copyright 2020 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.api;
/**
* Utility container for listing ledgers.
*/
public interface ListLedgersResult extends AutoCloseable {
/**
* Creates a <code>LedgersIterator</code>.
* This method must be called once per <code>ListLedgersResult</code> instance.
* @return a <code>LedgersIterator</code> instance.
*/
LedgersIterator iterator();
/**
* Creates a <code>Iterable</code>, which wraps a <code>LedgersIterator</code>.
* This method must be called once per <code>ListLedgersResult</code> instance.
* <br>
* Metadata store access exceptions (<code>IOException</code>) are wrapped within a RuntimeException.
* if you want to take care of these cases, it is better to use <code>LedgersIterator</code>.
* @return a <code>Iterable</code> instance, containing ledger ids.
*/
Iterable<Long> toIterable();
}
| 389 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/BookKeeperBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.EventLoopGroup;
import io.netty.util.HashedWheelTimer;
import java.io.IOException;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* BookKeeper Client Builder to build client instances.
*
* @since 4.6
*/
@Public
@Unstable
public interface BookKeeperBuilder {
/**
* Configure the bookkeeper client with a provided Netty EventLoopGroup.
*
* @param eventLoopGroup an external {@link EventLoopGroup} to use by the bookkeeper client.
*
* @return client builder.
*/
BookKeeperBuilder eventLoopGroup(EventLoopGroup eventLoopGroup);
/**
* Configure the bookkeeper client with a provided {@link ByteBufAllocator}.
*
* @param allocator an external {@link ByteBufAllocator} to use by the bookkeeper client.
* @return client builder.
* @since 4.9
*/
BookKeeperBuilder allocator(ByteBufAllocator allocator);
/**
* Configure the bookkeeper client with a provided {@link StatsLogger}.
*
* @param statsLogger an {@link StatsLogger} to use by the bookkeeper client to collect stats generated by the
* client.
*
* @return client builder.
*/
BookKeeperBuilder statsLogger(StatsLogger statsLogger);
/**
* Configure the bookkeeper client to use the provided dns resolver {@link DNSToSwitchMapping}.
*
* @param dnsResolver dns resolver for placement policy to use for resolving network locations.
*
* @return client builder
*/
BookKeeperBuilder dnsResolver(DNSToSwitchMapping dnsResolver);
/**
* Configure the bookkeeper client to use a provided Netty HashedWheelTimer.
*
* @param requestTimer request timer for client to manage timer related tasks.
*
* @return client builder
*/
BookKeeperBuilder requestTimer(HashedWheelTimer requestTimer);
/**
* Configure the bookkeeper client to use a provided {@link FeatureProvider}.
*
* @param featureProvider the feature provider
*
* @return client builder
*/
BookKeeperBuilder featureProvider(FeatureProvider featureProvider);
/**
* Start and initialize a new BookKeeper client.
*
* @return the client
*
* @throws BKException
* @throws InterruptedException
* @throws IOException
*/
BookKeeper build() throws BKException, InterruptedException, IOException;
}
| 390 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/LedgerMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.client.api;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.LimitedPrivate;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.net.BookieId;
/**
* Represents the client-side metadata of a ledger. It is immutable.
*
* @since 4.6
*/
@LimitedPrivate
@Unstable
public interface LedgerMetadata {
/**
* Returns the id of this ledger.
*
* @return the id of this ledger.
*/
long getLedgerId();
/**
* Returns the ensemble size of this ledger.
*
* @return the ensemble size of this ledger.
*/
int getEnsembleSize();
/**
* Returns the write quorum size of this ledger.
*
* @return the write quorum size of this ledger.
*/
int getWriteQuorumSize();
/**
* Returns the ack quorum size of this ledger.
*
* @return the ack quorum size of this ledger.
*/
int getAckQuorumSize();
/**
* Returns the last entry id of this ledger.
*
* <p>If this ledger is not sealed {@link #isClosed()}, it returns {@code -1L}.
*
* @return the last entry id of this ledger if it is sealed, otherwise -1.
*/
long getLastEntryId();
/**
* Returns the length of this ledger.
*
* <p>If this ledger is not sealed {@link #isClosed()}, it returns {@code 0}.
*
* @return the length of this ledger if it is sealed, otherwise 0.
*/
long getLength();
/**
* Whether the metadata contains the password and digest type for the ledger.
* Ledgers created with version 4.1.0 clients or older do not have this information.
*
* @return true if the metadata contains the password and digest type, false otherwise.
*/
boolean hasPassword();
/**
* Get the password for the ledger.
* For ledgers created with version 4.1.0 or older, an empty byte array is returned.
*
* @return the password for the ledger.
*/
byte[] getPassword();
/**
* Returns the digest type used by this ledger.
* May return null if the ledger was created with version 4.1.0 or below.
*
* @return the digest type used by this ledger.
*/
DigestType getDigestType();
/**
* Returns the creation timestamp of this ledger.
*
* @return the creation timestamp of this ledger.
*/
long getCtime();
/**
* Returns whether the ledger is sealed or not.
*
* @return true if the ledger is sealed, otherwise false.
*/
boolean isClosed();
/**
* Returns the custom metadata stored with the ledgers.
*
* @return the custom metadata stored with the ledgers.
*/
Map<String, byte[]> getCustomMetadata();
/**
* Returns the ensemble at the given {@code entryId}.
*
* @param entryId the entry id to retrieve its ensemble information
* @return the ensemble which contains the given {@code entryId}.
*/
List<BookieId> getEnsembleAt(long entryId);
/**
* Returns all the ensembles of this ledger.
*
* @return all the ensembles of this ledger.
*/
NavigableMap<Long, ? extends List<BookieId>> getAllEnsembles();
/**
* Returns the state of the metadata.
*
* @return the state of the metadata.
*/
State getState();
/**
* Possible metadata states.
*/
enum State {
/** The ledger is open. New entry may be added to it. */
OPEN,
/** A reader has tried to, or may be trying to recover the ledger.
The writer may be able to add new entries if fencing hasn't already occurred,
but any attempt to change ensemble will fail and the write will be forced to
close the ledger.
*/
IN_RECOVERY,
/** The ledger is closed. No new entries may be added to it.
The length and lastEntryId are fixed. Ensembles may change, but only for rereplication.
*/
CLOSED
}
/**
* Similar to #toString(), but omits the password of the ledger, so that it is safe to log the output.
*
* @return a string representation of the metadata, omitting the password.
*/
String toSafeString();
/**
* Get the format version which should be used to serialize the metadata.
*
* @return the format version.
*/
int getMetadataFormatVersion();
/**
* Get the unique creator token of the Ledger.
*
* @return the creator token
*/
long getCToken();
}
| 391 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/DeleteBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Builder-style interface to delete exiting ledgers.
*
* @since 4.6
*/
@Public
@Unstable
public interface DeleteBuilder extends OpBuilder<Void> {
/**
* Set the id of the ledger to be deleted.
*
* @param ledgerId
*
* @return the builder itself
*/
DeleteBuilder withLedgerId(long ledgerId);
}
| 392 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/BookKeeper.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.client.impl.BookKeeperBuilderImpl;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.conf.ClientConfiguration;
/**
* This is the entry point for BookKeeper client side API.
*
* @since 4.6
*/
@Public
@Unstable
public interface BookKeeper extends AutoCloseable {
/**
* Create a new builder which can be used to boot a new BookKeeper client.
*
* @param clientConfiguration the configuration for the client
* @return a builder
*/
static BookKeeperBuilder newBuilder(final ClientConfiguration clientConfiguration) {
return new BookKeeperBuilderImpl(clientConfiguration);
}
/**
* Start the creation of a new ledger.
*
* @return a builder for the new ledger
*/
CreateBuilder newCreateLedgerOp();
/**
* Open an existing ledger.
*
* @return a builder useful to create a readable handler for an existing ledger
*/
OpenBuilder newOpenLedgerOp();
/**
* Delete an existing ledger.
*
* @return a builder useful to delete an existing ledger
*/
DeleteBuilder newDeleteLedgerOp();
/**
* List ledgers.
*
* @return a builder useful to list ledgers.
*/
ListLedgersResultBuilder newListLedgersOp();
/**
* Get ledger metadata of a given ledger id.
*
* @param ledgerId id of the ledger.
* @return a <code>CompletableFuture</code> instance containing ledger metadata.
*/
CompletableFuture<LedgerMetadata> getLedgerMetadata(long ledgerId);
/**
* Close the client and release every resource.
*
* @throws BKException
* @throws InterruptedException
*/
@Override
void close() throws BKException, InterruptedException;
}
| 393 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/CreateBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Map;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Builder-style interface to create new ledgers.
*
* @since 4.6
* @see BookKeeper#newCreateLedgerOp()
*/
@Public
@Unstable
public interface CreateBuilder extends OpBuilder<WriteHandle> {
/**
* Set the number of bookies which will receive data for this ledger. It defaults to 3.
*
* @param ensembleSize the number of bookies
*
* @return the builder itself
*/
CreateBuilder withEnsembleSize(int ensembleSize);
/**
* Set the number of bookies which receive every single entry.
* In case of ensembleSize > writeQuorumSize data will
* be striped across a number of ensembleSize bookies. It defaults to 2.
*
* @param writeQuorumSize the replication factor for each entry
*
* @return the builder itself
*/
CreateBuilder withWriteQuorumSize(int writeQuorumSize);
/**
* Set the number of acknowledgements to wait before considering a write to be completed with success. This value
* can be less or equals to writeQuorumSize. It defaults to 2.
*
* @param ackQuorumSize the number of acknowledgements to wait for
*
* @return the builder itself
*/
CreateBuilder withAckQuorumSize(int ackQuorumSize);
/**
* Set a password for the ledger. It defaults to empty password
*
* @param password the password
*
* @return the builder itself
*/
CreateBuilder withPassword(byte[] password);
/**
* Set write flags. Write flags specify the behaviour of writes
*
* @param writeFlags the flags
*
* @return the builder itself
*/
CreateBuilder withWriteFlags(EnumSet<WriteFlag> writeFlags);
/**
* Set write flags. Write flags specify the behaviour of writes
*
* @param writeFlags the flags
*
* @return the builder itself
*/
default CreateBuilder withWriteFlags(WriteFlag ... writeFlags) {
return withWriteFlags(EnumSet.copyOf(Arrays.asList(writeFlags)));
}
/**
* Set a map a custom data to be attached to the ledger. The application is responsible for the semantics of these
* data.
*
* @param customMetadata the ledger metadata
*
* @return the builder itself
*/
CreateBuilder withCustomMetadata(Map<String, byte[]> customMetadata);
/**
* Set the Digest type used to guard data against corruption. It defaults to {@link DigestType#CRC32}
*
* @param digestType the type of digest
*
* @return the builder itself
*/
CreateBuilder withDigestType(DigestType digestType);
/**
* Switch the ledger into 'Advanced' mode. A ledger used in Advanced mode will explicitly generate the sequence of
* entry identifiers. Advanced ledgers can be created with a client side defined ledgerId
*
* @return a new {@link CreateAdvBuilder} builder
*/
CreateAdvBuilder makeAdv();
}
| 394 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Provide read access to a ledger.
*
* @since 4.6
*/
@Public
@Unstable
public interface ReadHandle extends Handle {
/**
* Read a sequence of entries asynchronously.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence, inclusive
* @return an handle to the result of the operation
*/
CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry);
/**
* Read a sequence of entries synchronously.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence, inclusive
* @return the result of the operation
*/
default LedgerEntries read(long firstEntry, long lastEntry) throws BKException, InterruptedException {
return FutureUtils.<LedgerEntries, BKException>result(readAsync(firstEntry, lastEntry),
BKException.HANDLER);
}
/**
* Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
* <br>This is the same of
* {@link #read(long, long) }
* but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
* read entries for which the writer has not received the acknowledge yet. <br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
* received the acknowledge.<br>
* For entries outside that range it is possible that the writer never received the acknowledge
* and so there is the risk that the reader is seeing entries before the writer and this could
* result in a consistency issue in some cases.<br>
* With this method you can even read entries before the LastAddConfirmed and entries after it with one call,
* the expected consistency will be as described above for each subrange of ids.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence, inclusive
* @return an handle to the result of the operation
*
* @see #readAsync(long, long)
* @see #readLastAddConfirmedAsync()
*/
CompletableFuture<LedgerEntries> readUnconfirmedAsync(long firstEntry, long lastEntry);
/**
* Read a sequence of entries synchronously.
*
* @param firstEntry
* id of first entry of sequence
* @param lastEntry
* id of last entry of sequence, inclusive
* @return an handle to the result of the operation
*
* @see #readUnconfirmedAsync(long, long)
*/
default LedgerEntries readUnconfirmed(long firstEntry, long lastEntry)
throws BKException, InterruptedException {
return FutureUtils.<LedgerEntries, BKException>result(readUnconfirmedAsync(firstEntry, lastEntry),
BKException.HANDLER);
}
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies. This
* call obtains the last add confirmed each bookie has received for this ledger
* and returns the maximum. If the ledger has been closed, the value returned by this
* call may not correspond to the id of the last entry of the ledger, since it reads
* the hint of bookies. Consequently, in the case the ledger has been closed, it may
* return a different value than getLastAddConfirmed, which returns the local value
* of the ledger handle.
*
* @return an handle to the result of the operation
* @see #getLastAddConfirmed()
*/
CompletableFuture<Long> readLastAddConfirmedAsync();
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies.
*
* @return the result of the operation
* @see #readLastAddConfirmedAsync()
*/
default long readLastAddConfirmed() throws BKException, InterruptedException {
return FutureUtils.<Long, BKException>result(readLastAddConfirmedAsync(),
BKException.HANDLER);
}
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies
* but it doesn't wait all the responses from the quorum. It would callback
* immediately if it received a LAC which is larger than current LAC.
*
* @return an handle to the result of the operation
*/
CompletableFuture<Long> tryReadLastAddConfirmedAsync();
/**
* Obtains asynchronously the last confirmed write from a quorum of bookies
* but it doesn't wait all the responses from the quorum.
*
* @return the result of the operation
* @see #tryReadLastAddConfirmedAsync()
*/
default long tryReadLastAddConfirmed() throws BKException, InterruptedException {
return FutureUtils.<Long, BKException>result(tryReadLastAddConfirmedAsync(),
BKException.HANDLER);
}
/**
* Get the last confirmed entry id on this ledger. It reads the local state of the ledger handle,
* which is different from the {@link #readLastAddConfirmed()} call.
*
* <p>In the case the ledger is not closed and the client is a reader, it is necessary to
* call {@link #readLastAddConfirmed()} to obtain a fresh value of last add confirmed entry id.
*
* @see #readLastAddConfirmed()
*
* @return the local value for LastAddConfirmed or -1L if no entry has been confirmed.
*/
long getLastAddConfirmed();
/**
* Returns the length of the data written in this ledger so much, in bytes.
*
* @return the length of the data written in this ledger, in bytes.
*/
long getLength();
/**
* Returns whether the ledger is sealed or not.
*
* <p>A ledger is sealed when either the client explicitly closes it ({@link WriteHandle#close()} or
* {@link WriteAdvHandle#close()}) or another client explicitly open and recovery it
* {@link OpenBuilder#withRecovery(boolean)}.
*
* <p>This method only checks the metadata cached locally. The metadata can be not update-to-date because
* the metadata notification is delayed.
*
* @return true if the ledger is sealed, otherwise false.
*/
boolean isClosed();
/**
* Asynchronous read specific entry and the latest last add confirmed.
* If the next entryId is less than known last add confirmed, the call will read next entry directly.
* If the next entryId is ahead of known last add confirmed, the call will issue a long poll read
* to wait for the next entry <i>entryId</i>.
*
* @param entryId
* next entry id to read
* @param timeOutInMillis
* timeout period to wait for the entry id to be available (for long poll only)
* if timeout for get the entry, it will return null entry.
* @param parallel
* whether to issue the long poll reads in parallel
* @return an handle to the result of the operation
*/
CompletableFuture<LastConfirmedAndEntry> readLastAddConfirmedAndEntryAsync(long entryId,
long timeOutInMillis,
boolean parallel);
/**
* Asynchronous read specific entry and the latest last add confirmed.
*
* @param entryId
* next entry id to read
* @param timeOutInMillis
* timeout period to wait for the entry id to be available (for long poll only)
* if timeout for get the entry, it will return null entry.
* @param parallel
* whether to issue the long poll reads in parallel
* @return the result of the operation
* @see #readLastAddConfirmedAndEntry(long, long, boolean)
*/
default LastConfirmedAndEntry readLastAddConfirmedAndEntry(long entryId,
long timeOutInMillis,
boolean parallel)
throws BKException, InterruptedException {
return FutureUtils.<LastConfirmedAndEntry, BKException>result(
readLastAddConfirmedAndEntryAsync(entryId, timeOutInMillis, parallel),
BKException.HANDLER);
}
}
| 395 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/LedgerEntry.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import io.netty.buffer.ByteBuf;
import java.nio.ByteBuffer;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* An entry in a ledger.
*
* <p>The entry implementation may hold references to byte buffers under the hood. The users holding the
* references to the instances of this class, are responsible for calling {@link LedgerEntry#close()} to release
* resources held by the entry instances.
*
* @since 4.6
*/
@Public
@Unstable
public interface LedgerEntry extends AutoCloseable {
/**
* The id of the ledger which contains the entry.
*
* @return the id of the ledger
*/
long getLedgerId();
/**
* The id of the entry.
*
* @return the id of the entry
*/
long getEntryId();
/**
* The length of the entry, that is the size of the content expressed in bytes.
*
* @return the size of the content
*/
long getLength();
/**
* Returns the content of the entry as a byte array.
*
* @return the content of the entry
*/
byte[] getEntryBytes();
/**
* Exposes this entry's data as an NIO {@link ByteBuffer}. The returned buffer
* shares the content with this underneath bytebuf (which you can get it by {@link #getEntryBuffer()}).
* Changing the position and limit of the returned NIO buffer does not affect the indexes and
* marks of this underneath buffer. This method is identical
* to {@code entry.getEntryBuffer().nioBuffer()}. This method does not
* modify {@code readerIndex} or {@code writerIndex} of the underlying bytebuf.
*/
ByteBuffer getEntryNioBuffer();
/**
* Return the internal {@link ByteBuf} that contains the entry payload.
*
* <p>This call doesn't change the reference count on the returned bytebuf. If you want to use the bytebuf
* after the entry is released (via {@link #close()}, the caller must retain the references of the bytebuf.
*
* @return a ByteBuf which contains the data
*/
ByteBuf getEntryBuffer();
/**
* Returns a duplicate of this entry.
*
* <p>This call will retain a slice of the underneath byte buffer.
*
* @return a duplicated ledger entry
*/
LedgerEntry duplicate();
/**
* {@inheritDoc}
*/
@Override
void close();
}
| 396 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/LedgersIterator.java | /*
* Copyright 2020 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.api;
import java.io.IOException;
/**
* Iterator for ledgers.
*/
public interface LedgersIterator {
/**
* Return true if there is at least one ledger to visit.
*
* @return true if there is at least one ledger to visit.
* @throws IOException thrown when there is a problem accessing the ledger metadata store.
*/
boolean hasNext() throws IOException;
/**
* Return next ledger id.
*
* @return next ledger id.
* @throws IOException thrown when there is a problem accessing the ledger metadata store.
*/
long next() throws IOException;
}
| 397 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ForceableHandle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Provide the ability to enforce durability guarantees to the writer.
*
* @see WriteAdvHandle
* @see WriteHandle
*
* @since 4.8
*/
@Public
@Unstable
public interface ForceableHandle {
/**
* Enforce durability to the entries written by this handle.
* <p>This API is useful with {@link WriteFlag#DEFERRED_SYNC}, because with
* that flag writes are acknowledged by the bookie without waiting for a
* durable write
* </p>
*
* @return an handle to the result
*/
CompletableFuture<Void> force();
}
| 398 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/LedgerEntries.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.client.api;
import java.util.Iterator;
/**
* Interface to wrap a sequence of entries.
*
* @since 4.6
*/
public interface LedgerEntries
extends AutoCloseable, Iterable<LedgerEntry> {
/**
* Gets a specific LedgerEntry by entryId.
*
* @param entryId the LedgerEntry id
* @return the LedgerEntry, null if no LedgerEntry with such entryId
*/
LedgerEntry getEntry(long entryId);
/**
* Get an iterator over all the ledger entries contained in the
* LedgerEntries object.
*
* <p>Calling this method does not modify the reference count of the ByteBuf in the returned LedgerEntry objects.
* The caller who calls {@link #iterator()} should make sure that they do not call ByteBuf.release() on the
* LedgerEntry objects to avoid a double free.
* All reference counts will be decremented when the containing LedgerEntries object is closed via {@link #close()}.
*
* @return an iterator of LedgerEntry objects
*/
@Override
Iterator<LedgerEntry> iterator();
/**
* Close to release the resources held by this instance.
*/
@Override
void close();
}
| 399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.