index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BufferedReadChannel.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.nio.channels.FileChannel;
/**
* A Buffered channel without a write buffer. Only reads are buffered.
*/
public class BufferedReadChannel extends BufferedChannelBase {
// The capacity of the read buffer.
protected final int readCapacity;
// The buffer for read operations.
protected final ByteBuf readBuffer;
// The starting position of the data currently in the read buffer.
protected long readBufferStartPosition = Long.MIN_VALUE;
long invocationCount = 0;
long cacheHitCount = 0;
public BufferedReadChannel(FileChannel fileChannel, int readCapacity) {
super(fileChannel);
this.readCapacity = readCapacity;
this.readBuffer = Unpooled.buffer(readCapacity);
}
/**
* Read as many bytes into dest as dest.capacity() starting at position pos in the
* FileChannel. This function can read from the buffer or the file channel
* depending on the implementation..
* @param dest
* @param pos
* @return The total number of bytes read.
* -1 if the given position is greater than or equal to the file's current size.
* @throws IOException if I/O error occurs
*/
public int read(ByteBuf dest, long pos) throws IOException {
return read(dest, pos, dest.writableBytes());
}
public synchronized int read(ByteBuf dest, long pos, int length) throws IOException {
invocationCount++;
long currentPosition = pos;
long eof = validateAndGetFileChannel().size();
// return -1 if the given position is greater than or equal to the file's current size.
if (pos >= eof) {
return -1;
}
while (length > 0) {
// Check if the data is in the buffer, if so, copy it.
if (readBufferStartPosition <= currentPosition
&& currentPosition < readBufferStartPosition + readBuffer.readableBytes()) {
int posInBuffer = (int) (currentPosition - readBufferStartPosition);
int bytesToCopy = Math.min(length, readBuffer.readableBytes() - posInBuffer);
dest.writeBytes(readBuffer, posInBuffer, bytesToCopy);
currentPosition += bytesToCopy;
length -= bytesToCopy;
cacheHitCount++;
} else if (currentPosition >= eof) {
// here we reached eof.
break;
} else {
// We don't have it in the buffer, so put necessary data in the buffer
readBufferStartPosition = currentPosition;
int readBytes = 0;
if ((readBytes = validateAndGetFileChannel().read(readBuffer.internalNioBuffer(0, readCapacity),
currentPosition)) <= 0) {
throw new IOException("Reading from filechannel returned a non-positive value. Short read.");
}
readBuffer.writerIndex(readBytes);
}
}
return (int) (currentPosition - pos);
}
public synchronized void clear() {
readBuffer.clear();
}
}
| 500 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LegacyCookieValidation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import com.google.common.collect.Lists;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Legacy implementation of CookieValidation.
*/
public class LegacyCookieValidation implements CookieValidation {
private static final Logger log = LoggerFactory.getLogger(LegacyCookieValidation.class);
private final ServerConfiguration conf;
private final RegistrationManager registrationManager;
public LegacyCookieValidation(ServerConfiguration conf,
RegistrationManager registrationManager) {
this.conf = conf;
this.registrationManager = registrationManager;
}
@Override
public void checkCookies(List<File> directories) throws BookieException {
try {
// 1. retrieve the instance id
String instanceId = registrationManager.getClusterInstanceId();
// 2. build the master cookie from the configuration
Cookie.Builder builder = Cookie.generateCookie(conf);
if (null != instanceId) {
builder.setInstanceId(instanceId);
}
Cookie masterCookie = builder.build();
boolean allowExpansion = conf.getAllowStorageExpansion();
// 3. read the cookie from registration manager. it is the `source-of-truth` of a given bookie.
// if it doesn't exist in registration manager, this bookie is a new bookie, otherwise it is
// an old bookie.
List<BookieId> possibleBookieIds = possibleBookieIds(conf);
final Versioned<Cookie> rmCookie = readAndVerifyCookieFromRegistrationManager(
masterCookie, registrationManager, possibleBookieIds, allowExpansion);
// 4. check if the cookie appear in all the directories.
List<File> missedCookieDirs = new ArrayList<>();
List<Cookie> existingCookies = Lists.newArrayList();
if (null != rmCookie) {
existingCookies.add(rmCookie.getValue());
}
// 4.1 verify the cookies in journal directories
Pair<List<File>, List<Cookie>> result =
verifyAndGetMissingDirs(masterCookie,
allowExpansion, directories);
missedCookieDirs.addAll(result.getLeft());
existingCookies.addAll(result.getRight());
// 5. if there are directories missing cookies,
// this is either a:
// - new environment
// - a directory is being added
// - a directory has been corrupted/wiped, which is an error
if (!missedCookieDirs.isEmpty()) {
if (rmCookie == null) {
// 5.1 new environment: all directories should be empty
verifyDirsForNewEnvironment(missedCookieDirs);
stampNewCookie(conf, masterCookie, registrationManager,
Version.NEW, directories);
} else if (allowExpansion) {
// 5.2 storage is expanding
Set<File> knownDirs = getKnownDirs(existingCookies);
verifyDirsForStorageExpansion(missedCookieDirs, knownDirs);
stampNewCookie(conf, masterCookie, registrationManager,
rmCookie.getVersion(), directories);
} else {
// 5.3 Cookie-less directories and
// we can't do anything with them
log.error("There are directories without a cookie,"
+ " and this is neither a new environment,"
+ " nor is storage expansion enabled. "
+ "Empty directories are {}", missedCookieDirs);
throw new BookieException.InvalidCookieException();
}
} else {
if (rmCookie == null) {
// No corresponding cookie found in registration manager. The bookie should fail to come up.
log.error("Cookie for this bookie is not stored in metadata store. Bookie failing to come up");
throw new BookieException.InvalidCookieException();
}
}
} catch (IOException ioe) {
log.error("Error accessing cookie on disks", ioe);
throw new BookieException.InvalidCookieException(ioe);
}
}
private static List<BookieId> possibleBookieIds(ServerConfiguration conf)
throws BookieException {
// we need to loop through all possible bookie identifiers to ensure it is treated as a new environment
// just because of bad configuration
List<BookieId> addresses = Lists.newArrayListWithExpectedSize(3);
// we are checking all possibilities here, so we don't need to fail if we can only get
// loopback address. it will fail anyway when the bookie attempts to listen on loopback address.
try {
if (null != conf.getBookieId()) {
// If BookieID is configured, it takes precedence over default network information used as id.
addresses.add(BookieImpl.getBookieId(conf));
} else {
// ip address
addresses.add(BookieImpl.getBookieAddress(
new ServerConfiguration(conf)
.setUseHostNameAsBookieID(false)
.setAdvertisedAddress(null)
.setAllowLoopback(true)
).toBookieId());
// host name
addresses.add(BookieImpl.getBookieAddress(
new ServerConfiguration(conf)
.setUseHostNameAsBookieID(true)
.setAdvertisedAddress(null)
.setAllowLoopback(true)
).toBookieId());
// advertised address
if (null != conf.getAdvertisedAddress()) {
addresses.add(BookieImpl.getBookieAddress(conf).toBookieId());
}
}
} catch (UnknownHostException e) {
throw new BookieException.UnknownBookieIdException(e);
}
return addresses;
}
private static Versioned<Cookie> readAndVerifyCookieFromRegistrationManager(
Cookie masterCookie, RegistrationManager rm,
List<BookieId> addresses, boolean allowExpansion)
throws BookieException {
Versioned<Cookie> rmCookie = null;
for (BookieId address : addresses) {
try {
rmCookie = Cookie.readFromRegistrationManager(rm, address);
// If allowStorageExpansion option is set, we should
// make sure that the new set of ledger/index dirs
// is a super set of the old; else, we fail the cookie check
if (allowExpansion) {
masterCookie.verifyIsSuperSet(rmCookie.getValue());
} else {
masterCookie.verify(rmCookie.getValue());
}
} catch (BookieException.CookieNotFoundException e) {
continue;
}
}
return rmCookie;
}
private static Pair<List<File>, List<Cookie>> verifyAndGetMissingDirs(
Cookie masterCookie, boolean allowExpansion, List<File> dirs)
throws BookieException.InvalidCookieException, IOException {
List<File> missingDirs = Lists.newArrayList();
List<Cookie> existedCookies = Lists.newArrayList();
for (File dir : dirs) {
try {
Cookie c = Cookie.readFromDirectory(dir);
if (allowExpansion) {
masterCookie.verifyIsSuperSet(c);
} else {
masterCookie.verify(c);
}
existedCookies.add(c);
} catch (FileNotFoundException fnf) {
missingDirs.add(dir);
}
}
return Pair.of(missingDirs, existedCookies);
}
private static void verifyDirsForNewEnvironment(List<File> missedCookieDirs)
throws BookieException.InvalidCookieException {
List<File> nonEmptyDirs = new ArrayList<>();
for (File dir : missedCookieDirs) {
String[] content = dir.list();
if (content != null && content.length != 0) {
nonEmptyDirs.add(dir);
}
}
if (!nonEmptyDirs.isEmpty()) {
log.error("Not all the new directories are empty. New directories that are not empty are: " + nonEmptyDirs);
throw new BookieException.InvalidCookieException();
}
}
private static void stampNewCookie(ServerConfiguration conf,
Cookie masterCookie,
RegistrationManager rm,
Version version,
List<File> dirs)
throws BookieException, IOException {
// backfill all the directories that miss cookies (for storage expansion)
log.info("Stamping new cookies on all dirs {}", dirs);
for (File dir : dirs) {
masterCookie.writeToDirectory(dir);
}
masterCookie.writeToRegistrationManager(rm, conf, version);
}
private static Set<File> getKnownDirs(List<Cookie> cookies) {
return cookies.stream()
.flatMap((c) -> {
List<String> dirs = new ArrayList<>(Arrays.asList(c.getLedgerDirPathsFromCookie()));
if (null != c.getIndexDirPathsFromCookie()) {
dirs.addAll(Arrays.asList(c.getIndexDirPathsFromCookie()));
}
return Arrays.stream(dirs.toArray(new String[]{}));
}).map((s) -> new File(s)).collect(Collectors.toSet());
}
private static void verifyDirsForStorageExpansion(
List<File> missedCookieDirs,
Set<File> existingLedgerDirs) throws BookieException.InvalidCookieException {
List<File> dirsMissingData = new ArrayList<File>();
List<File> nonEmptyDirs = new ArrayList<File>();
for (File dir : missedCookieDirs) {
if (existingLedgerDirs.contains(dir.getParentFile())) {
// if one of the existing ledger dirs doesn't have cookie,
// let us not proceed further
dirsMissingData.add(dir);
continue;
}
String[] content = dir.list();
if (content != null && content.length != 0) {
nonEmptyDirs.add(dir);
}
}
if (dirsMissingData.size() > 0 || nonEmptyDirs.size() > 0) {
log.error("Either not all local directories have cookies or directories being added "
+ " newly are not empty. "
+ "Directories missing cookie file are: " + dirsMissingData
+ " New directories that are not empty are: " + nonEmptyDirs);
throw new BookieException.InvalidCookieException();
}
}
} | 501 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/HandleFactoryImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import org.apache.bookkeeper.bookie.LedgerStorage.LedgerDeletionListener;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
class HandleFactoryImpl implements HandleFactory, LedgerDeletionListener {
private final ConcurrentLongHashMap<LedgerDescriptor> ledgers;
private final ConcurrentLongHashMap<LedgerDescriptor> readOnlyLedgers;
final LedgerStorage ledgerStorage;
HandleFactoryImpl(LedgerStorage ledgerStorage) {
this.ledgerStorage = ledgerStorage;
this.ledgers = ConcurrentLongHashMap.<LedgerDescriptor>newBuilder().build();
this.readOnlyLedgers = ConcurrentLongHashMap.<LedgerDescriptor>newBuilder().build();
ledgerStorage.registerLedgerDeletionListener(this);
}
@Override
public LedgerDescriptor getHandle(final long ledgerId, final byte[] masterKey) throws IOException, BookieException {
LedgerDescriptor handle = ledgers.get(ledgerId);
if (handle == null) {
handle = LedgerDescriptor.create(masterKey, ledgerId, ledgerStorage);
ledgers.putIfAbsent(ledgerId, handle);
}
handle.checkAccess(masterKey);
return handle;
}
@Override
public LedgerDescriptor getReadOnlyHandle(final long ledgerId) throws IOException, Bookie.NoLedgerException {
LedgerDescriptor handle = readOnlyLedgers.get(ledgerId);
if (handle == null) {
handle = LedgerDescriptor.createReadOnly(ledgerId, ledgerStorage);
readOnlyLedgers.putIfAbsent(ledgerId, handle);
}
return handle;
}
@Override
public void ledgerDeleted(long ledgerId) {
ledgers.remove(ledgerId);
readOnlyLedgers.remove(ledgerId);
}
}
| 502 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/SkipListFlusher.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
/**
* Flush entries from skip list.
*/
public interface SkipListFlusher {
/**
* Process an entry.
*
* @param ledgerId Ledger ID.
* @param entryId The entry id this entry.
* @param entry Entry ByteBuffer
* @throws IOException
*/
void process(long ledgerId, long entryId, ByteBuf entry) throws IOException;
}
| 503 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogMetadata.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.function.LongPredicate;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap;
/**
* Records the total size, remaining size and the set of ledgers that comprise a
* entry log.
*/
public class EntryLogMetadata {
protected long entryLogId;
protected long totalSize;
protected long remainingSize;
protected final ConcurrentLongLongHashMap ledgersMap;
private static final short DEFAULT_SERIALIZATION_VERSION = 0;
protected EntryLogMetadata() {
ledgersMap = ConcurrentLongLongHashMap.newBuilder()
.expectedItems(256)
.concurrencyLevel(1)
.build();
}
public EntryLogMetadata(long logId) {
this();
this.entryLogId = logId;
totalSize = remainingSize = 0;
}
public void addLedgerSize(long ledgerId, long size) {
totalSize += size;
remainingSize += size;
ledgersMap.addAndGet(ledgerId, size);
}
public boolean containsLedger(long ledgerId) {
return ledgersMap.containsKey(ledgerId);
}
public double getUsage() {
if (totalSize == 0L) {
return 0.0f;
}
return (double) remainingSize / totalSize;
}
public boolean isEmpty() {
return ledgersMap.isEmpty();
}
public long getEntryLogId() {
return entryLogId;
}
public long getTotalSize() {
return totalSize;
}
public long getRemainingSize() {
return remainingSize;
}
public ConcurrentLongLongHashMap getLedgersMap() {
return ledgersMap;
}
public void removeLedgerIf(LongPredicate predicate) {
ledgersMap.removeIf((ledgerId, size) -> {
boolean shouldRemove = predicate.test(ledgerId);
if (shouldRemove) {
remainingSize -= size;
}
return shouldRemove;
});
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{totalSize = ").append(totalSize).append(", remainingSize = ").append(remainingSize)
.append(", ledgersMap = ").append(ledgersMap.toString()).append("}");
return sb.toString();
}
/**
* Serializes {@link EntryLogMetadata} and writes to
* {@link DataOutputStream}.
* <pre>
* schema:
* 2-bytes: schema-version
* 8-bytes: entrylog-entryLogId
* 8-bytes: entrylog-totalSize
* 8-bytes: entrylog-remainingSize
* 8-bytes: total number of ledgers
* ledgers-map
* [repeat]: (8-bytes::ledgerId, 8-bytes::size-of-ledger)
* </pre>
* @param out
* @throws IOException
* throws if it couldn't serialize metadata-fields
* @throws IllegalStateException
* throws if it couldn't serialize ledger-map
*/
public void serialize(DataOutputStream out) throws IOException, IllegalStateException {
out.writeShort(DEFAULT_SERIALIZATION_VERSION);
out.writeLong(entryLogId);
out.writeLong(totalSize);
out.writeLong(remainingSize);
out.writeLong(ledgersMap.size());
ledgersMap.forEach((ledgerId, size) -> {
try {
out.writeLong(ledgerId);
out.writeLong(size);
} catch (IOException e) {
throw new IllegalStateException("Failed to serialize entryLogMetadata", e);
}
});
out.flush();
}
/**
* Deserializes {@link EntryLogMetadataRecyclable} from given {@link DataInputStream}.
* Caller has to recycle returned {@link EntryLogMetadataRecyclable}.
* @param in
* @return
* @throws IOException
*/
public static EntryLogMetadataRecyclable deserialize(DataInputStream in) throws IOException {
EntryLogMetadataRecyclable metadata = EntryLogMetadataRecyclable.get();
try {
short serVersion = in.readShort();
if ((serVersion != DEFAULT_SERIALIZATION_VERSION)) {
throw new IOException(String.format("%s. expected =%d, found=%d", "serialization version doesn't match",
DEFAULT_SERIALIZATION_VERSION, serVersion));
}
metadata.entryLogId = in.readLong();
metadata.totalSize = in.readLong();
metadata.remainingSize = in.readLong();
long ledgersMapSize = in.readLong();
for (int i = 0; i < ledgersMapSize; i++) {
long ledgerId = in.readLong();
long entryId = in.readLong();
metadata.ledgersMap.put(ledgerId, entryId);
}
return metadata;
} catch (IOException e) {
metadata.recycle();
throw e;
} catch (Exception e) {
metadata.recycle();
throw new IOException(e);
}
}
public void clear() {
entryLogId = -1L;
totalSize = -1L;
remainingSize = -1L;
ledgersMap.clear();
}
/**
* Recyclable {@link EntryLogMetadata} class.
*
*/
public static class EntryLogMetadataRecyclable extends EntryLogMetadata {
private final Handle<EntryLogMetadataRecyclable> recyclerHandle;
private EntryLogMetadataRecyclable(Handle<EntryLogMetadataRecyclable> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
private static final Recycler<EntryLogMetadataRecyclable> RECYCLER =
new Recycler<EntryLogMetadataRecyclable>() {
protected EntryLogMetadataRecyclable newObject(Recycler.Handle<EntryLogMetadataRecyclable> handle) {
return new EntryLogMetadataRecyclable(handle);
}
};
public static EntryLogMetadataRecyclable get() {
EntryLogMetadataRecyclable metadata = RECYCLER.get();
return metadata;
}
public void recycle() {
clear();
recyclerHandle.recycle(this);
}
}
}
| 504 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Provides a <i>Bookie</i> server that stores entries for clients.
*/
package org.apache.bookkeeper.bookie;
| 505 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieFileChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.channels.FileChannel;
/**
* A FileChannel for the JournalChannel read and write, we can use this interface to extend the FileChannel
* which we use in the JournalChannel.
*/
public interface BookieFileChannel {
/**
* An interface for get the FileChannel from the provider.
* @return
*/
FileChannel getFileChannel() throws FileNotFoundException, IOException;
/**
* Check the given file if exists.
*
* @param file
* @return
*/
boolean fileExists(File file);
/**
* Get the file descriptor of the opened file.
*
* @return
* @throws IOException
*/
FileDescriptor getFD() throws IOException;
/**
* Close file channel and release all resources.
*/
void close() throws IOException;
}
| 506 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ENTRYLOGGER_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ENTRYLOGS_PER_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NUM_OF_WRITE_ACTIVE_LEDGERS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalCause;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.BufferedLogChannel;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.apache.commons.lang3.mutable.MutableInt;
@Slf4j
class EntryLogManagerForEntryLogPerLedger extends EntryLogManagerBase {
static class BufferedLogChannelWithDirInfo {
private final BufferedLogChannel logChannel;
volatile boolean ledgerDirFull = false;
private BufferedLogChannelWithDirInfo(BufferedLogChannel logChannel) {
this.logChannel = logChannel;
}
private boolean isLedgerDirFull() {
return ledgerDirFull;
}
private void setLedgerDirFull(boolean ledgerDirFull) {
this.ledgerDirFull = ledgerDirFull;
}
BufferedLogChannel getLogChannel() {
return logChannel;
}
}
class EntryLogAndLockTuple {
private final Lock ledgerLock;
private BufferedLogChannelWithDirInfo entryLogWithDirInfo;
private EntryLogAndLockTuple(long ledgerId) {
int lockIndex = MathUtils.signSafeMod(Long.hashCode(ledgerId), lockArrayPool.length());
if (lockArrayPool.get(lockIndex) == null) {
lockArrayPool.compareAndSet(lockIndex, null, new ReentrantLock());
}
ledgerLock = lockArrayPool.get(lockIndex);
}
private Lock getLedgerLock() {
return ledgerLock;
}
BufferedLogChannelWithDirInfo getEntryLogWithDirInfo() {
return entryLogWithDirInfo;
}
private void setEntryLogWithDirInfo(BufferedLogChannelWithDirInfo entryLogWithDirInfo) {
this.entryLogWithDirInfo = entryLogWithDirInfo;
}
}
@StatsDoc(
name = ENTRYLOGGER_SCOPE,
category = CATEGORY_SERVER,
help = "EntryLogger related stats"
)
class EntryLogsPerLedgerCounter {
@StatsDoc(
name = NUM_OF_WRITE_ACTIVE_LEDGERS,
help = "Number of write active ledgers"
)
private final Counter numOfWriteActiveLedgers;
@StatsDoc(
name = NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY,
help = "Number of write ledgers removed after cache expiry"
)
private final Counter numOfWriteLedgersRemovedCacheExpiry;
@StatsDoc(
name = NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE,
help = "Number of write ledgers removed due to reach max cache size"
)
private final Counter numOfWriteLedgersRemovedCacheMaxSize;
@StatsDoc(
name = NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS,
help = "Number of ledgers having multiple entry logs"
)
private final Counter numLedgersHavingMultipleEntrylogs;
@StatsDoc(
name = ENTRYLOGS_PER_LEDGER,
help = "The distribution of number of entry logs per ledger"
)
private final OpStatsLogger entryLogsPerLedger;
/*
* ledgerIdEntryLogCounterCacheMap cache will be used to store count of
* entrylogs as value for its ledgerid key. This cacheMap limits -
* 'expiry duration' and 'maximumSize' will be set to
* entryLogPerLedgerCounterLimitsMultFactor times of
* 'ledgerIdEntryLogMap' cache limits. This is needed because entries
* from 'ledgerIdEntryLogMap' can be removed from cache becasue of
* accesstime expiry or cache size limits, but to know the actual number
* of entrylogs per ledger, we should maintain this count for long time.
*/
private final LoadingCache<Long, MutableInt> ledgerIdEntryLogCounterCacheMap;
EntryLogsPerLedgerCounter(StatsLogger statsLogger) {
this.numOfWriteActiveLedgers = statsLogger.getCounter(NUM_OF_WRITE_ACTIVE_LEDGERS);
this.numOfWriteLedgersRemovedCacheExpiry = statsLogger
.getCounter(NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY);
this.numOfWriteLedgersRemovedCacheMaxSize = statsLogger
.getCounter(NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE);
this.numLedgersHavingMultipleEntrylogs = statsLogger.getCounter(NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS);
this.entryLogsPerLedger = statsLogger.getOpStatsLogger(ENTRYLOGS_PER_LEDGER);
ledgerIdEntryLogCounterCacheMap = CacheBuilder.newBuilder()
.expireAfterAccess(entrylogMapAccessExpiryTimeInSeconds * entryLogPerLedgerCounterLimitsMultFactor,
TimeUnit.SECONDS)
.maximumSize(maximumNumberOfActiveEntryLogs * entryLogPerLedgerCounterLimitsMultFactor)
.removalListener(new RemovalListener<Long, MutableInt>() {
@Override
public void onRemoval(RemovalNotification<Long, MutableInt> removedEntryFromCounterMap) {
if ((removedEntryFromCounterMap != null)
&& (removedEntryFromCounterMap.getValue() != null)) {
synchronized (EntryLogsPerLedgerCounter.this) {
entryLogsPerLedger
.registerSuccessfulValue(removedEntryFromCounterMap.getValue().intValue());
}
}
}
}).build(new CacheLoader<Long, MutableInt>() {
@Override
public MutableInt load(Long key) throws Exception {
synchronized (EntryLogsPerLedgerCounter.this) {
return new MutableInt();
}
}
});
}
private synchronized void openNewEntryLogForLedger(Long ledgerId, boolean newLedgerInEntryLogMapCache) {
int numOfEntrylogsForThisLedger = ledgerIdEntryLogCounterCacheMap.getUnchecked(ledgerId).incrementAndGet();
if (numOfEntrylogsForThisLedger == 2) {
numLedgersHavingMultipleEntrylogs.inc();
}
if (newLedgerInEntryLogMapCache) {
numOfWriteActiveLedgers.inc();
}
}
private synchronized void removedLedgerFromEntryLogMapCache(Long ledgerId, RemovalCause cause) {
numOfWriteActiveLedgers.dec();
if (cause.equals(RemovalCause.EXPIRED)) {
numOfWriteLedgersRemovedCacheExpiry.inc();
} else if (cause.equals(RemovalCause.SIZE)) {
numOfWriteLedgersRemovedCacheMaxSize.inc();
}
}
/*
* this is for testing purpose only. guava's cache doesnt cleanup
* completely (including calling expiry removal listener) automatically
* when access timeout elapses.
*
* https://google.github.io/guava/releases/19.0/api/docs/com/google/
* common/cache/CacheBuilder.html
*
* If expireAfterWrite or expireAfterAccess is requested entries may be
* evicted on each cache modification, on occasional cache accesses, or
* on calls to Cache.cleanUp(). Expired entries may be counted by
* Cache.size(), but will never be visible to read or write operations.
*
* Certain cache configurations will result in the accrual of periodic
* maintenance tasks which will be performed during write operations, or
* during occasional read operations in the absence of writes. The
* Cache.cleanUp() method of the returned cache will also perform
* maintenance, but calling it should not be necessary with a high
* throughput cache. Only caches built with removalListener,
* expireAfterWrite, expireAfterAccess, weakKeys, weakValues, or
* softValues perform periodic maintenance.
*/
@VisibleForTesting
void doCounterMapCleanup() {
ledgerIdEntryLogCounterCacheMap.cleanUp();
}
@VisibleForTesting
ConcurrentMap<Long, MutableInt> getCounterMap() {
return ledgerIdEntryLogCounterCacheMap.asMap();
}
}
private final AtomicReferenceArray<Lock> lockArrayPool;
private final LoadingCache<Long, EntryLogAndLockTuple> ledgerIdEntryLogMap;
/*
* every time active logChannel is accessed from ledgerIdEntryLogMap
* cache, the accesstime of that entry is updated. But for certain
* operations we dont want to impact accessTime of the entries (like
* periodic flush of current active logChannels), and those operations
* can use this copy of references.
*/
private final ConcurrentLongHashMap<BufferedLogChannelWithDirInfo> replicaOfCurrentLogChannels;
private final CacheLoader<Long, EntryLogAndLockTuple> entryLogAndLockTupleCacheLoader;
private final DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus;
private final int entrylogMapAccessExpiryTimeInSeconds;
private final int maximumNumberOfActiveEntryLogs;
private final int entryLogPerLedgerCounterLimitsMultFactor;
// Expose Stats
private final StatsLogger statsLogger;
final EntryLogsPerLedgerCounter entryLogsPerLedgerCounter;
EntryLogManagerForEntryLogPerLedger(ServerConfiguration conf, LedgerDirsManager ledgerDirsManager,
EntryLoggerAllocator entryLoggerAllocator,
List<DefaultEntryLogger.EntryLogListener> listeners,
DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus,
StatsLogger statsLogger) throws IOException {
super(conf, ledgerDirsManager, entryLoggerAllocator, listeners);
this.recentlyCreatedEntryLogsStatus = recentlyCreatedEntryLogsStatus;
this.rotatedLogChannels = new CopyOnWriteArrayList<BufferedLogChannel>();
this.replicaOfCurrentLogChannels =
ConcurrentLongHashMap.<BufferedLogChannelWithDirInfo>newBuilder().build();
this.entrylogMapAccessExpiryTimeInSeconds = conf.getEntrylogMapAccessExpiryTimeInSeconds();
this.maximumNumberOfActiveEntryLogs = conf.getMaximumNumberOfActiveEntryLogs();
this.entryLogPerLedgerCounterLimitsMultFactor = conf.getEntryLogPerLedgerCounterLimitsMultFactor();
ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener());
this.lockArrayPool = new AtomicReferenceArray<Lock>(maximumNumberOfActiveEntryLogs * 2);
this.entryLogAndLockTupleCacheLoader = new CacheLoader<Long, EntryLogAndLockTuple>() {
@Override
public EntryLogAndLockTuple load(Long key) throws Exception {
return new EntryLogAndLockTuple(key);
}
};
/*
* Currently we are relying on access time based eviction policy for
* removal of EntryLogAndLockTuple, so if the EntryLogAndLockTuple of
* the ledger is not accessed in
* entrylogMapAccessExpiryTimeInSeconds period, it will be removed
* from the cache.
*
* We are going to introduce explicit advisory writeClose call, with
* that explicit call EntryLogAndLockTuple of the ledger will be
* removed from the cache. But still timebased eviciton policy is
* needed because it is not guaranteed that Bookie/EntryLogger would
* receive successfully write close call in all the cases.
*/
ledgerIdEntryLogMap = CacheBuilder.newBuilder()
.expireAfterAccess(entrylogMapAccessExpiryTimeInSeconds, TimeUnit.SECONDS)
.maximumSize(maximumNumberOfActiveEntryLogs)
.removalListener(new RemovalListener<Long, EntryLogAndLockTuple>() {
@Override
public void onRemoval(
RemovalNotification<Long, EntryLogAndLockTuple> expiredLedgerEntryLogMapEntry) {
onCacheEntryRemoval(expiredLedgerEntryLogMapEntry);
}
}).build(entryLogAndLockTupleCacheLoader);
this.statsLogger = statsLogger;
this.entryLogsPerLedgerCounter = new EntryLogsPerLedgerCounter(this.statsLogger);
}
/*
* This method is called when an entry is removed from the cache. This could
* be because access time of that ledger has elapsed
* entrylogMapAccessExpiryTimeInSeconds period, or number of active
* currentlogs in the cache has reached the size of
* maximumNumberOfActiveEntryLogs, or if an entry is explicitly
* invalidated/removed. In these cases entry for that ledger is removed from
* cache. Since the entrylog of this ledger is not active anymore it has to
* be removed from replicaOfCurrentLogChannels and added to
* rotatedLogChannels.
*
* Because of performance/optimizations concerns the cleanup maintenance
* operations wont happen automatically, for more info on eviction cleanup
* maintenance tasks -
* https://google.github.io/guava/releases/19.0/api/docs/com/google/
* common/cache/CacheBuilder.html
*
*/
private void onCacheEntryRemoval(RemovalNotification<Long, EntryLogAndLockTuple> removedLedgerEntryLogMapEntry) {
Long ledgerId = removedLedgerEntryLogMapEntry.getKey();
if (log.isDebugEnabled()) {
log.debug("LedgerId {} is being evicted from the cache map because of {}", ledgerId,
removedLedgerEntryLogMapEntry.getCause());
}
EntryLogAndLockTuple entryLogAndLockTuple = removedLedgerEntryLogMapEntry.getValue();
if (entryLogAndLockTuple == null) {
log.error("entryLogAndLockTuple is not supposed to be null in entry removal listener for ledger : {}",
ledgerId);
return;
}
Lock lock = entryLogAndLockTuple.ledgerLock;
BufferedLogChannelWithDirInfo logChannelWithDirInfo = entryLogAndLockTuple.getEntryLogWithDirInfo();
if (logChannelWithDirInfo == null) {
log.error("logChannel for ledger: {} is not supposed to be null in entry removal listener", ledgerId);
return;
}
lock.lock();
try {
BufferedLogChannel logChannel = logChannelWithDirInfo.getLogChannel();
// Append ledgers map at the end of entry log
try {
logChannel.appendLedgersMap();
} catch (Exception e) {
log.error("Got IOException while trying to appendLedgersMap in cacheEntryRemoval callback", e);
}
replicaOfCurrentLogChannels.remove(logChannel.getLogId());
rotatedLogChannels.add(logChannel);
entryLogsPerLedgerCounter.removedLedgerFromEntryLogMapCache(ledgerId,
removedLedgerEntryLogMapEntry.getCause());
} finally {
lock.unlock();
}
}
private LedgerDirsListener getLedgerDirsListener() {
return new LedgerDirsListener() {
@Override
public void diskFull(File disk) {
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
if (disk.equals(currentLogWithDirInfo.getLogChannel().getLogFile().getParentFile())) {
currentLogWithDirInfo.setLedgerDirFull(true);
}
}
}
@Override
public void diskWritable(File disk) {
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
if (disk.equals(currentLogWithDirInfo.getLogChannel().getLogFile().getParentFile())) {
currentLogWithDirInfo.setLedgerDirFull(false);
}
}
}
};
}
Lock getLock(long ledgerId) throws IOException {
try {
return ledgerIdEntryLogMap.get(ledgerId).getLedgerLock();
} catch (Exception e) {
log.error("Received unexpected exception while fetching lock to acquire for ledger: " + ledgerId, e);
throw new IOException("Received unexpected exception while fetching lock to acquire", e);
}
}
/*
* sets the logChannel for the given ledgerId. It will add the new
* logchannel to replicaOfCurrentLogChannels, and the previous one will
* be removed from replicaOfCurrentLogChannels. Previous logChannel will
* be added to rotatedLogChannels in both the cases.
*/
@Override
public void setCurrentLogForLedgerAndAddToRotate(long ledgerId, BufferedLogChannel logChannel) throws IOException {
Lock lock = getLock(ledgerId);
lock.lock();
try {
BufferedLogChannel hasToRotateLogChannel = getCurrentLogForLedger(ledgerId);
boolean newLedgerInEntryLogMapCache = (hasToRotateLogChannel == null);
logChannel.setLedgerIdAssigned(ledgerId);
BufferedLogChannelWithDirInfo logChannelWithDirInfo = new BufferedLogChannelWithDirInfo(logChannel);
ledgerIdEntryLogMap.get(ledgerId).setEntryLogWithDirInfo(logChannelWithDirInfo);
entryLogsPerLedgerCounter.openNewEntryLogForLedger(ledgerId, newLedgerInEntryLogMapCache);
replicaOfCurrentLogChannels.put(logChannel.getLogId(), logChannelWithDirInfo);
if (hasToRotateLogChannel != null) {
replicaOfCurrentLogChannels.remove(hasToRotateLogChannel.getLogId());
rotatedLogChannels.add(hasToRotateLogChannel);
}
} catch (Exception e) {
log.error("Received unexpected exception while fetching entry from map for ledger: " + ledgerId, e);
throw new IOException("Received unexpected exception while fetching entry from map", e);
} finally {
lock.unlock();
}
}
@Override
public BufferedLogChannel getCurrentLogForLedger(long ledgerId) throws IOException {
BufferedLogChannelWithDirInfo bufferedLogChannelWithDirInfo = getCurrentLogWithDirInfoForLedger(ledgerId);
BufferedLogChannel bufferedLogChannel = null;
if (bufferedLogChannelWithDirInfo != null) {
bufferedLogChannel = bufferedLogChannelWithDirInfo.getLogChannel();
}
return bufferedLogChannel;
}
public BufferedLogChannelWithDirInfo getCurrentLogWithDirInfoForLedger(long ledgerId) throws IOException {
Lock lock = getLock(ledgerId);
lock.lock();
try {
EntryLogAndLockTuple entryLogAndLockTuple = ledgerIdEntryLogMap.get(ledgerId);
return entryLogAndLockTuple.getEntryLogWithDirInfo();
} catch (Exception e) {
log.error("Received unexpected exception while fetching entry from map for ledger: " + ledgerId, e);
throw new IOException("Received unexpected exception while fetching entry from map", e);
} finally {
lock.unlock();
}
}
public Set<BufferedLogChannelWithDirInfo> getCopyOfCurrentLogs() {
return new HashSet<BufferedLogChannelWithDirInfo>(replicaOfCurrentLogChannels.values());
}
@Override
public BufferedLogChannel getCurrentLogIfPresent(long entryLogId) {
BufferedLogChannelWithDirInfo bufferedLogChannelWithDirInfo = replicaOfCurrentLogChannels.get(entryLogId);
BufferedLogChannel logChannel = null;
if (bufferedLogChannelWithDirInfo != null) {
logChannel = bufferedLogChannelWithDirInfo.getLogChannel();
}
return logChannel;
}
@Override
public void checkpoint() throws IOException {
/*
* In the case of entryLogPerLedgerEnabled we need to flush
* both rotatedlogs and currentlogs. This is needed because
* syncThread periodically does checkpoint and at this time
* all the logs should be flushed.
*
*/
super.flush();
}
@Override
public void prepareSortedLedgerStorageCheckpoint(long numBytesFlushed) throws IOException {
// do nothing
/*
* prepareSortedLedgerStorageCheckpoint is required for
* singleentrylog scenario, but it is not needed for
* entrylogperledger scenario, since entries of a ledger go
* to a entrylog (even during compaction) and SyncThread
* drives periodic checkpoint logic.
*/
}
@Override
public void prepareEntryMemTableFlush() {
// do nothing
}
@Override
public boolean commitEntryMemTableFlush() throws IOException {
// lock it only if there is new data
// so that cache accesstime is not changed
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
BufferedLogChannel currentLog = currentLogWithDirInfo.getLogChannel();
if (reachEntryLogLimit(currentLog, 0L)) {
Long ledgerId = currentLog.getLedgerIdAssigned();
Lock lock = getLock(ledgerId);
lock.lock();
try {
if (reachEntryLogLimit(currentLog, 0L)) {
log.info("Rolling entry logger since it reached size limitation for ledger: {}", ledgerId);
createNewLog(ledgerId, "after entry log file is rotated");
}
} finally {
lock.unlock();
}
}
}
/*
* in the case of entrylogperledger, SyncThread drives
* checkpoint logic for every flushInterval. So
* EntryMemtable doesn't need to call checkpoint in the case
* of entrylogperledger.
*/
return false;
}
/*
* this is for testing purpose only. guava's cache doesnt cleanup
* completely (including calling expiry removal listener) automatically
* when access timeout elapses.
*
* https://google.github.io/guava/releases/19.0/api/docs/com/google/
* common/cache/CacheBuilder.html
*
* If expireAfterWrite or expireAfterAccess is requested entries may be
* evicted on each cache modification, on occasional cache accesses, or
* on calls to Cache.cleanUp(). Expired entries may be counted by
* Cache.size(), but will never be visible to read or write operations.
*
* Certain cache configurations will result in the accrual of periodic
* maintenance tasks which will be performed during write operations, or
* during occasional read operations in the absence of writes. The
* Cache.cleanUp() method of the returned cache will also perform
* maintenance, but calling it should not be necessary with a high
* throughput cache. Only caches built with removalListener,
* expireAfterWrite, expireAfterAccess, weakKeys, weakValues, or
* softValues perform periodic maintenance.
*/
@VisibleForTesting
void doEntryLogMapCleanup() {
ledgerIdEntryLogMap.cleanUp();
}
@VisibleForTesting
ConcurrentMap<Long, EntryLogAndLockTuple> getCacheAsMap() {
return ledgerIdEntryLogMap.asMap();
}
/*
* Returns writable ledger dir with least number of current active
* entrylogs.
*/
@Override
public File getDirForNextEntryLog(List<File> writableLedgerDirs) {
Map<File, MutableInt> writableLedgerDirFrequency = new HashMap<File, MutableInt>();
writableLedgerDirs.stream()
.forEach((ledgerDir) -> writableLedgerDirFrequency.put(ledgerDir, new MutableInt()));
for (BufferedLogChannelWithDirInfo logChannelWithDirInfo : replicaOfCurrentLogChannels.values()) {
File parentDirOfCurrentLogChannel = logChannelWithDirInfo.getLogChannel().getLogFile().getParentFile();
if (writableLedgerDirFrequency.containsKey(parentDirOfCurrentLogChannel)) {
writableLedgerDirFrequency.get(parentDirOfCurrentLogChannel).increment();
}
}
@SuppressWarnings("unchecked")
Optional<Entry<File, MutableInt>> ledgerDirWithLeastNumofCurrentLogs = writableLedgerDirFrequency.entrySet()
.stream().min(Map.Entry.comparingByValue());
return ledgerDirWithLeastNumofCurrentLogs.get().getKey();
}
@Override
public void close() throws IOException {
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
if (currentLogWithDirInfo.getLogChannel() != null) {
currentLogWithDirInfo.getLogChannel().close();
}
}
}
@Override
public void forceClose() {
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
IOUtils.close(log, currentLogWithDirInfo.getLogChannel());
}
}
@Override
void flushCurrentLogs() throws IOException {
Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
for (BufferedLogChannelWithDirInfo logChannelWithDirInfo : copyOfCurrentLogsWithDirInfo) {
/**
* flushCurrentLogs method is called during checkpoint, so metadata
* of the file also should be force written.
*/
flushLogChannel(logChannelWithDirInfo.getLogChannel(), true);
}
}
@Override
public BufferedLogChannel createNewLogForCompaction() throws IOException {
throw new UnsupportedOperationException(
"When entryLogPerLedger is enabled, transactional compaction should have been disabled");
}
@Override
public long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException {
Lock lock = getLock(ledger);
lock.lock();
try {
return super.addEntry(ledger, entry, rollLog);
} finally {
lock.unlock();
}
}
@Override
void createNewLog(long ledgerId) throws IOException {
Lock lock = getLock(ledgerId);
lock.lock();
try {
super.createNewLog(ledgerId);
} finally {
lock.unlock();
}
}
@Override
BufferedLogChannel getCurrentLogForLedgerForAddEntry(long ledgerId, int entrySize, boolean rollLog)
throws IOException {
Lock lock = getLock(ledgerId);
lock.lock();
try {
BufferedLogChannelWithDirInfo logChannelWithDirInfo = getCurrentLogWithDirInfoForLedger(ledgerId);
BufferedLogChannel logChannel = null;
if (logChannelWithDirInfo != null) {
logChannel = logChannelWithDirInfo.getLogChannel();
}
boolean reachEntryLogLimit = rollLog ? reachEntryLogLimit(logChannel, entrySize)
: readEntryLogHardLimit(logChannel, entrySize);
// Create new log if logSizeLimit reached or current disk is full
boolean diskFull = (logChannel == null) ? false : logChannelWithDirInfo.isLedgerDirFull();
boolean allDisksFull = !ledgerDirsManager.hasWritableLedgerDirs();
/**
* if disk of the logChannel is full or if the entrylog limit is
* reached of if the logchannel is not initialized, then
* createNewLog. If allDisks are full then proceed with the current
* logChannel, since Bookie must have turned to readonly mode and
* the addEntry traffic would be from GC and it is ok to proceed in
* this case.
*/
if ((diskFull && (!allDisksFull)) || reachEntryLogLimit || (logChannel == null)) {
if (logChannel != null) {
logChannel.flushAndForceWriteIfRegularFlush(false);
}
createNewLog(ledgerId,
": diskFull = " + diskFull + ", allDisksFull = " + allDisksFull
+ ", reachEntryLogLimit = " + reachEntryLogLimit + ", logChannel = " + logChannel);
}
return getCurrentLogForLedger(ledgerId);
} finally {
lock.unlock();
}
}
@Override
public void flushRotatedLogs() throws IOException {
for (BufferedLogChannel channel : rotatedLogChannels) {
channel.flushAndForceWrite(true);
// since this channel is only used for writing, after flushing the channel,
// we had to close the underlying file channel. Otherwise, we might end up
// leaking fds which cause the disk spaces could not be reclaimed.
channel.close();
recentlyCreatedEntryLogsStatus.flushRotatedEntryLog(channel.getLogId());
rotatedLogChannels.remove(channel);
log.info("Synced entry logger {} to disk.", channel.getLogId());
}
}
}
| 507 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/GarbageCollectorThread.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.util.BookKeeperConstants.METADATA_CACHE;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import lombok.Getter;
import org.apache.bookkeeper.bookie.BookieException.EntryLogMetadataMapException;
import org.apache.bookkeeper.bookie.GarbageCollector.GarbageCleaner;
import org.apache.bookkeeper.bookie.stats.GarbageCollectorStats;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.bookie.storage.ldb.PersistentEntryLogMetadataMap;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.commons.lang3.mutable.MutableLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the garbage collector thread that runs in the background to
* remove any entry log files that no longer contains any active ledger.
*/
public class GarbageCollectorThread implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(GarbageCollectorThread.class);
private static final int SECOND = 1000;
private static final long MINUTE = TimeUnit.MINUTES.toMillis(1);
// Maps entry log files to the set of ledgers that comprise the file and the size usage per ledger
private EntryLogMetadataMap entryLogMetaMap;
private final ScheduledExecutorService gcExecutor;
Future<?> scheduledFuture = null;
// This is how often we want to run the Garbage Collector Thread (in milliseconds).
final long gcWaitTime;
// Compaction parameters
boolean isForceMinorCompactionAllow = false;
boolean enableMinorCompaction = false;
final double minorCompactionThreshold;
final long minorCompactionInterval;
final long minorCompactionMaxTimeMillis;
long lastMinorCompactionTime;
boolean isForceMajorCompactionAllow = false;
boolean enableMajorCompaction = false;
final double majorCompactionThreshold;
final long majorCompactionInterval;
long majorCompactionMaxTimeMillis;
long lastMajorCompactionTime;
@Getter
final boolean isForceGCAllowWhenNoSpace;
// Entry Logger Handle
final EntryLogger entryLogger;
final AbstractLogCompactor compactor;
// Stats loggers for garbage collection operations
private final GarbageCollectorStats gcStats;
private volatile long totalEntryLogSize;
private volatile int numActiveEntryLogs;
final CompactableLedgerStorage ledgerStorage;
// flag to ensure gc thread will not be interrupted during compaction
// to reduce the risk getting entry log corrupted
final AtomicBoolean compacting = new AtomicBoolean(false);
// use to get the compacting status
final AtomicBoolean minorCompacting = new AtomicBoolean(false);
final AtomicBoolean majorCompacting = new AtomicBoolean(false);
volatile boolean running = true;
// Boolean to trigger a forced GC.
final AtomicBoolean forceGarbageCollection = new AtomicBoolean(false);
// Boolean to disable major compaction, when disk is almost full
final AtomicBoolean suspendMajorCompaction = new AtomicBoolean(false);
// Boolean to disable minor compaction, when disk is full
final AtomicBoolean suspendMinorCompaction = new AtomicBoolean(false);
final ScanAndCompareGarbageCollector garbageCollector;
final GarbageCleaner garbageCleaner;
final ServerConfiguration conf;
final LedgerDirsManager ledgerDirsManager;
private static final AtomicLong threadNum = new AtomicLong(0);
final AbstractLogCompactor.Throttler throttler;
/**
* Create a garbage collector thread.
*
* @param conf
* Server Configuration Object.
* @throws IOException
*/
public GarbageCollectorThread(ServerConfiguration conf, LedgerManager ledgerManager,
final LedgerDirsManager ledgerDirsManager,
final CompactableLedgerStorage ledgerStorage,
EntryLogger entryLogger,
StatsLogger statsLogger) throws IOException {
this(conf, ledgerManager, ledgerDirsManager, ledgerStorage, entryLogger, statsLogger,
Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("GarbageCollectorThread")));
}
/**
* Create a garbage collector thread.
*
* @param conf
* Server Configuration Object.
* @throws IOException
*/
public GarbageCollectorThread(ServerConfiguration conf,
LedgerManager ledgerManager,
final LedgerDirsManager ledgerDirsManager,
final CompactableLedgerStorage ledgerStorage,
EntryLogger entryLogger,
StatsLogger statsLogger,
ScheduledExecutorService gcExecutor)
throws IOException {
this.gcExecutor = gcExecutor;
this.conf = conf;
this.ledgerDirsManager = ledgerDirsManager;
this.entryLogger = entryLogger;
this.entryLogMetaMap = createEntryLogMetadataMap();
this.ledgerStorage = ledgerStorage;
this.gcWaitTime = conf.getGcWaitTime();
this.numActiveEntryLogs = 0;
this.totalEntryLogSize = 0L;
this.garbageCollector = new ScanAndCompareGarbageCollector(ledgerManager, ledgerStorage, conf, statsLogger);
this.gcStats = new GarbageCollectorStats(
statsLogger,
() -> numActiveEntryLogs,
() -> totalEntryLogSize,
() -> garbageCollector.getNumActiveLedgers()
);
this.garbageCleaner = ledgerId -> {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("delete ledger : " + ledgerId);
}
gcStats.getDeletedLedgerCounter().inc();
ledgerStorage.deleteLedger(ledgerId);
} catch (IOException e) {
LOG.error("Exception when deleting the ledger index file on the Bookie: ", e);
}
};
// compaction parameters
minorCompactionThreshold = conf.getMinorCompactionThreshold();
minorCompactionInterval = conf.getMinorCompactionInterval() * SECOND;
majorCompactionThreshold = conf.getMajorCompactionThreshold();
majorCompactionInterval = conf.getMajorCompactionInterval() * SECOND;
isForceGCAllowWhenNoSpace = conf.getIsForceGCAllowWhenNoSpace();
majorCompactionMaxTimeMillis = conf.getMajorCompactionMaxTimeMillis();
minorCompactionMaxTimeMillis = conf.getMinorCompactionMaxTimeMillis();
boolean isForceAllowCompaction = conf.isForceAllowCompaction();
AbstractLogCompactor.LogRemovalListener remover = new AbstractLogCompactor.LogRemovalListener() {
@Override
public void removeEntryLog(long logToRemove) {
try {
GarbageCollectorThread.this.removeEntryLog(logToRemove);
} catch (EntryLogMetadataMapException e) {
// Ignore and continue because ledger will not be cleaned up
// from entry-logger in this pass and will be taken care in
// next schedule task
LOG.warn("Failed to remove entry-log metadata {}", logToRemove, e);
}
}
};
if (conf.getUseTransactionalCompaction()) {
this.compactor = new TransactionalEntryLogCompactor(conf, entryLogger, ledgerStorage, remover);
} else {
this.compactor = new EntryLogCompactor(conf, entryLogger, ledgerStorage, remover);
}
this.throttler = new AbstractLogCompactor.Throttler(conf);
if (minorCompactionInterval > 0 && minorCompactionThreshold > 0) {
if (minorCompactionThreshold > 1.0d) {
throw new IOException("Invalid minor compaction threshold "
+ minorCompactionThreshold);
}
if (minorCompactionInterval < gcWaitTime) {
throw new IOException("Too short minor compaction interval : "
+ minorCompactionInterval);
}
enableMinorCompaction = true;
}
if (isForceAllowCompaction) {
if (minorCompactionThreshold > 0 && minorCompactionThreshold < 1.0d) {
isForceMinorCompactionAllow = true;
}
if (majorCompactionThreshold > 0 && majorCompactionThreshold < 1.0d) {
isForceMajorCompactionAllow = true;
}
}
if (majorCompactionInterval > 0 && majorCompactionThreshold > 0) {
if (majorCompactionThreshold > 1.0d) {
throw new IOException("Invalid major compaction threshold "
+ majorCompactionThreshold);
}
if (majorCompactionInterval < gcWaitTime) {
throw new IOException("Too short major compaction interval : "
+ majorCompactionInterval);
}
enableMajorCompaction = true;
}
if (enableMinorCompaction && enableMajorCompaction) {
if (minorCompactionInterval >= majorCompactionInterval
|| minorCompactionThreshold >= majorCompactionThreshold) {
throw new IOException("Invalid minor/major compaction settings : minor ("
+ minorCompactionThreshold + ", " + minorCompactionInterval
+ "), major (" + majorCompactionThreshold + ", "
+ majorCompactionInterval + ")");
}
}
LOG.info("Minor Compaction : enabled=" + enableMinorCompaction + ", threshold="
+ minorCompactionThreshold + ", interval=" + minorCompactionInterval);
LOG.info("Major Compaction : enabled=" + enableMajorCompaction + ", threshold="
+ majorCompactionThreshold + ", interval=" + majorCompactionInterval);
lastMinorCompactionTime = lastMajorCompactionTime = System.currentTimeMillis();
}
private EntryLogMetadataMap createEntryLogMetadataMap() throws IOException {
if (conf.isGcEntryLogMetadataCacheEnabled()) {
String baseDir = Strings.isNullOrEmpty(conf.getGcEntryLogMetadataCachePath())
? this.ledgerDirsManager.getAllLedgerDirs().get(0).getPath() : conf.getGcEntryLogMetadataCachePath();
try {
return new PersistentEntryLogMetadataMap(baseDir, conf);
} catch (IOException e) {
LOG.error("Failed to initialize persistent-metadata-map , clean up {}",
baseDir + "/" + METADATA_CACHE, e);
throw e;
}
} else {
return new InMemoryEntryLogMetadataMap();
}
}
public void enableForceGC() {
if (forceGarbageCollection.compareAndSet(false, true)) {
LOG.info("Forced garbage collection triggered by thread: {}", Thread.currentThread().getName());
triggerGC(true, suspendMajorCompaction.get(),
suspendMinorCompaction.get());
}
}
public void enableForceGC(boolean forceMajor, boolean forceMinor) {
if (forceGarbageCollection.compareAndSet(false, true)) {
LOG.info("Forced garbage collection triggered by thread: {}, forceMajor: {}, forceMinor: {}",
Thread.currentThread().getName(), forceMajor, forceMinor);
triggerGC(true, !forceMajor, !forceMinor);
}
}
public void disableForceGC() {
if (forceGarbageCollection.compareAndSet(true, false)) {
LOG.info("{} disabled force garbage collection since bookie has enough space now.", Thread
.currentThread().getName());
}
}
Future<?> triggerGC(final boolean force,
final boolean suspendMajor,
final boolean suspendMinor) {
return gcExecutor.submit(() -> {
runWithFlags(force, suspendMajor, suspendMinor);
});
}
Future<?> triggerGC() {
final boolean force = forceGarbageCollection.get();
final boolean suspendMajor = suspendMajorCompaction.get();
final boolean suspendMinor = suspendMinorCompaction.get();
return gcExecutor.submit(() -> {
runWithFlags(force, suspendMajor, suspendMinor);
});
}
public boolean isInForceGC() {
return forceGarbageCollection.get();
}
public boolean isMajorGcSuspend() {
return suspendMajorCompaction.get();
}
public boolean isMinorGcSuspend() {
return suspendMinorCompaction.get();
}
public void suspendMajorGC() {
if (suspendMajorCompaction.compareAndSet(false, true)) {
LOG.info("Suspend Major Compaction triggered by thread: {}", Thread.currentThread().getName());
}
}
public void resumeMajorGC() {
if (suspendMajorCompaction.compareAndSet(true, false)) {
LOG.info("{} Major Compaction back to normal since bookie has enough space now.",
Thread.currentThread().getName());
}
}
public void suspendMinorGC() {
if (suspendMinorCompaction.compareAndSet(false, true)) {
LOG.info("Suspend Minor Compaction triggered by thread: {}", Thread.currentThread().getName());
}
}
public void resumeMinorGC() {
if (suspendMinorCompaction.compareAndSet(true, false)) {
LOG.info("{} Minor Compaction back to normal since bookie has enough space now.",
Thread.currentThread().getName());
}
}
public void start() {
if (scheduledFuture != null) {
scheduledFuture.cancel(false);
}
long initialDelay = getModInitialDelay();
scheduledFuture = gcExecutor.scheduleAtFixedRate(this, initialDelay, gcWaitTime, TimeUnit.MILLISECONDS);
}
/**
* when number of ledger's Dir are more than 1,the same of GarbageCollectorThread will do the same thing,
* Especially
* 1) deleting ledger, then SyncThread will be timed to do rocksDB compact
* 2) compact: entry, cost cpu.
* then get Mod initial Delay time to simply avoid GarbageCollectorThread working at the same time
*/
public long getModInitialDelay() {
int ledgerDirsNum = conf.getLedgerDirs().length;
long splitTime = gcWaitTime / ledgerDirsNum;
long currentThreadNum = threadNum.incrementAndGet();
return gcWaitTime + currentThreadNum * splitTime;
}
@Override
public void run() {
boolean force = forceGarbageCollection.get();
boolean suspendMajor = suspendMajorCompaction.get();
boolean suspendMinor = suspendMinorCompaction.get();
runWithFlags(force, suspendMajor, suspendMinor);
if (force) {
// only set force to false if it had been true when the garbage
// collection cycle started
forceGarbageCollection.set(false);
}
}
public void runWithFlags(boolean force, boolean suspendMajor, boolean suspendMinor) {
long threadStart = MathUtils.nowInNano();
if (force) {
LOG.info("Garbage collector thread forced to perform GC before expiry of wait time.");
}
// Recover and clean up previous state if using transactional compaction
compactor.cleanUpAndRecover();
try {
// gc inactive/deleted ledgers
// this is used in extractMetaFromEntryLogs to calculate the usage of entry log
doGcLedgers();
// Extract all of the ledger ID's that comprise all of the entry logs
// (except for the current new one which is still being written to).
extractMetaFromEntryLogs();
// gc entry logs
doGcEntryLogs();
if (suspendMajor) {
LOG.info("Disk almost full, suspend major compaction to slow down filling disk.");
}
if (suspendMinor) {
LOG.info("Disk full, suspend minor compaction to slow down filling disk.");
}
long curTime = System.currentTimeMillis();
if (((isForceMajorCompactionAllow && force) || (enableMajorCompaction
&& (force || curTime - lastMajorCompactionTime > majorCompactionInterval)))
&& (!suspendMajor)) {
// enter major compaction
LOG.info("Enter major compaction, suspendMajor {}", suspendMajor);
majorCompacting.set(true);
try {
doCompactEntryLogs(majorCompactionThreshold, majorCompactionMaxTimeMillis);
} finally {
lastMajorCompactionTime = System.currentTimeMillis();
// and also move minor compaction time
lastMinorCompactionTime = lastMajorCompactionTime;
gcStats.getMajorCompactionCounter().inc();
majorCompacting.set(false);
}
} else if (((isForceMinorCompactionAllow && force) || (enableMinorCompaction
&& (force || curTime - lastMinorCompactionTime > minorCompactionInterval)))
&& (!suspendMinor)) {
// enter minor compaction
LOG.info("Enter minor compaction, suspendMinor {}", suspendMinor);
minorCompacting.set(true);
try {
doCompactEntryLogs(minorCompactionThreshold, minorCompactionMaxTimeMillis);
} finally {
lastMinorCompactionTime = System.currentTimeMillis();
gcStats.getMinorCompactionCounter().inc();
minorCompacting.set(false);
}
}
gcStats.getGcThreadRuntime().registerSuccessfulEvent(
MathUtils.nowInNano() - threadStart, TimeUnit.NANOSECONDS);
} catch (EntryLogMetadataMapException e) {
LOG.error("Error in entryLog-metadatamap, Failed to complete GC/Compaction due to entry-log {}",
e.getMessage(), e);
gcStats.getGcThreadRuntime().registerFailedEvent(
MathUtils.nowInNano() - threadStart, TimeUnit.NANOSECONDS);
} finally {
if (force && forceGarbageCollection.compareAndSet(true, false)) {
LOG.info("{} Set forceGarbageCollection to false after force GC to make it forceGC-able again.",
Thread.currentThread().getName());
}
}
}
/**
* Do garbage collection ledger index files.
*/
private void doGcLedgers() {
garbageCollector.gc(garbageCleaner);
}
/**
* Garbage collect those entry loggers which are not associated with any active ledgers.
*/
private void doGcEntryLogs() throws EntryLogMetadataMapException {
// Get a cumulative count, don't update until complete
AtomicLong totalEntryLogSizeAcc = new AtomicLong(0L);
// Loop through all of the entry logs and remove the non-active ledgers.
entryLogMetaMap.forEach((entryLogId, meta) -> {
try {
boolean modified = removeIfLedgerNotExists(meta);
if (meta.isEmpty()) {
// This means the entry log is not associated with any active
// ledgers anymore.
// We can remove this entry log file now.
LOG.info("Deleting entryLogId {} as it has no active ledgers!", entryLogId);
if (removeEntryLog(entryLogId)) {
gcStats.getReclaimedSpaceViaDeletes().addCount(meta.getTotalSize());
} else {
gcStats.getReclaimFailedToDelete().inc();
}
} else if (modified) {
// update entryLogMetaMap only when the meta modified.
entryLogMetaMap.put(meta.getEntryLogId(), meta);
}
} catch (EntryLogMetadataMapException e) {
// Ignore and continue because ledger will not be cleaned up
// from entry-logger in this pass and will be taken care in next
// schedule task
LOG.warn("Failed to remove ledger from entry-log metadata {}", entryLogId, e);
}
totalEntryLogSizeAcc.getAndAdd(meta.getRemainingSize());
});
this.totalEntryLogSize = totalEntryLogSizeAcc.get();
this.numActiveEntryLogs = entryLogMetaMap.size();
}
private boolean removeIfLedgerNotExists(EntryLogMetadata meta) throws EntryLogMetadataMapException {
MutableBoolean modified = new MutableBoolean(false);
meta.removeLedgerIf((entryLogLedger) -> {
// Remove the entry log ledger from the set if it isn't active.
try {
boolean exist = ledgerStorage.ledgerExists(entryLogLedger);
if (!exist) {
modified.setTrue();
}
return !exist;
} catch (IOException e) {
LOG.error("Error reading from ledger storage", e);
return false;
}
});
return modified.getValue();
}
/**
* Compact entry logs if necessary.
*
* <p>
* Compaction will be executed from low unused space to high unused space.
* Those entry log files whose remaining size percentage is higher than threshold
* would not be compacted.
* </p>
*/
@VisibleForTesting
void doCompactEntryLogs(double threshold, long maxTimeMillis) throws EntryLogMetadataMapException {
LOG.info("Do compaction to compact those files lower than {}", threshold);
final int numBuckets = 10;
int[] entryLogUsageBuckets = new int[numBuckets];
int[] compactedBuckets = new int[numBuckets];
ArrayList<LinkedList<Long>> compactableBuckets = new ArrayList<>(numBuckets);
for (int i = 0; i < numBuckets; i++) {
compactableBuckets.add(new LinkedList<>());
}
long start = System.currentTimeMillis();
MutableLong end = new MutableLong(start);
MutableLong timeDiff = new MutableLong(0);
entryLogMetaMap.forEach((entryLogId, meta) -> {
double usage = meta.getUsage();
if (conf.isUseTargetEntryLogSizeForGc() && usage < 1.0d) {
usage = (double) meta.getRemainingSize() / Math.max(meta.getTotalSize(), conf.getEntryLogSizeLimit());
}
int bucketIndex = calculateUsageIndex(numBuckets, usage);
entryLogUsageBuckets[bucketIndex]++;
if (timeDiff.getValue() < maxTimeMillis) {
end.setValue(System.currentTimeMillis());
timeDiff.setValue(end.getValue() - start);
}
if ((usage >= threshold
|| (maxTimeMillis > 0 && timeDiff.getValue() >= maxTimeMillis)
|| !running)) {
// We allow the usage limit calculation to continue so that we get an accurate
// report of where the usage was prior to running compaction.
return;
}
compactableBuckets.get(bucketIndex).add(meta.getEntryLogId());
});
LOG.info(
"Compaction: entry log usage buckets before compaction [10% 20% 30% 40% 50% 60% 70% 80% 90% 100%] = {}",
entryLogUsageBuckets);
final int maxBucket = calculateUsageIndex(numBuckets, threshold);
int totalEntryLogIds = 0;
for (int currBucket = 0; currBucket <= maxBucket; currBucket++) {
totalEntryLogIds += compactableBuckets.get(currBucket).size();
}
long lastPrintTimestamp = 0;
AtomicInteger processedEntryLogCnt = new AtomicInteger(0);
stopCompaction:
for (int currBucket = 0; currBucket <= maxBucket; currBucket++) {
LinkedList<Long> entryLogIds = compactableBuckets.get(currBucket);
while (!entryLogIds.isEmpty()) {
if (timeDiff.getValue() < maxTimeMillis) {
end.setValue(System.currentTimeMillis());
timeDiff.setValue(end.getValue() - start);
}
if ((maxTimeMillis > 0 && timeDiff.getValue() >= maxTimeMillis) || !running) {
// We allow the usage limit calculation to continue so that we get an accurate
// report of where the usage was prior to running compaction.
break stopCompaction;
}
final int bucketIndex = currBucket;
final long logId = entryLogIds.remove();
if (System.currentTimeMillis() - lastPrintTimestamp >= MINUTE) {
lastPrintTimestamp = System.currentTimeMillis();
LOG.info("Compaction progress {} / {}, current compaction entryLogId: {}",
processedEntryLogCnt.get(), totalEntryLogIds, logId);
}
entryLogMetaMap.forKey(logId, (entryLogId, meta) -> {
if (meta == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Metadata for entry log {} already deleted", logId);
}
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting entry log {} with usage {} below threshold {}",
meta.getEntryLogId(), meta.getUsage(), threshold);
}
long priorRemainingSize = meta.getRemainingSize();
compactEntryLog(meta);
gcStats.getReclaimedSpaceViaCompaction().addCount(meta.getTotalSize() - priorRemainingSize);
compactedBuckets[bucketIndex]++;
processedEntryLogCnt.getAndIncrement();
});
}
}
if (LOG.isDebugEnabled()) {
if (!running) {
LOG.debug("Compaction exited due to gc not running");
}
if (maxTimeMillis > 0 && timeDiff.getValue() > maxTimeMillis) {
LOG.debug("Compaction ran for {}ms but was limited by {}ms", timeDiff, maxTimeMillis);
}
}
LOG.info(
"Compaction: entry log usage buckets[10% 20% 30% 40% 50% 60% 70% 80% 90% 100%] = {}, compacted {}",
entryLogUsageBuckets, compactedBuckets);
}
/**
* Calculate the index for the batch based on the usage between 0 and 1.
*
* @param numBuckets Number of reporting buckets.
* @param usage 0.0 - 1.0 value representing the usage of the entry log.
* @return index based on the number of buckets The last bucket will have the 1.0 if added.
*/
int calculateUsageIndex(int numBuckets, double usage) {
return Math.min(
numBuckets - 1,
(int) Math.floor(usage * numBuckets));
}
/**
* Shutdown the garbage collector thread.
*
* @throws InterruptedException if there is an exception stopping gc thread.
*/
@SuppressFBWarnings("SWL_SLEEP_WITH_LOCK_HELD")
public synchronized void shutdown() throws InterruptedException {
if (!this.running) {
return;
}
LOG.info("Shutting down GarbageCollectorThread");
throttler.cancelledAcquire();
compactor.throttler.cancelledAcquire();
while (!compacting.compareAndSet(false, true)) {
// Wait till the thread stops compacting
Thread.sleep(100);
}
this.running = false;
// Interrupt GC executor thread
gcExecutor.shutdownNow();
try {
entryLogMetaMap.close();
} catch (Exception e) {
LOG.warn("Failed to close entryLog metadata-map", e);
}
}
/**
* Remove entry log.
*
* @param entryLogId
* Entry Log File Id
* @throws EntryLogMetadataMapException
*/
protected boolean removeEntryLog(long entryLogId) throws EntryLogMetadataMapException {
// remove entry log file successfully
if (entryLogger.removeEntryLog(entryLogId)) {
LOG.info("Removing entry log metadata for {}", entryLogId);
entryLogMetaMap.remove(entryLogId);
return true;
}
return false;
}
/**
* Compact an entry log.
*
* @param entryLogMeta
*/
protected void compactEntryLog(EntryLogMetadata entryLogMeta) {
// Similar with Sync Thread
// try to mark compacting flag to make sure it would not be interrupted
// by shutdown during compaction. otherwise it will receive
// ClosedByInterruptException which may cause index file & entry logger
// closed and corrupted.
if (!compacting.compareAndSet(false, true)) {
// set compacting flag failed, means compacting is true now
// indicates that compaction is in progress for this EntryLogId.
return;
}
try {
// Do the actual compaction
compactor.compact(entryLogMeta);
} catch (Exception e) {
LOG.error("Failed to compact entry log {} due to unexpected error", entryLogMeta.getEntryLogId(), e);
} finally {
// Mark compaction done
compacting.set(false);
}
}
/**
* Method to read in all of the entry logs (those that we haven't done so yet),
* and find the set of ledger ID's that make up each entry log file.
*
* @throws EntryLogMetadataMapException
*/
protected void extractMetaFromEntryLogs() throws EntryLogMetadataMapException {
for (long entryLogId : entryLogger.getFlushedLogIds()) {
// Comb the current entry log file if it has not already been extracted.
if (entryLogMetaMap.containsKey(entryLogId)) {
continue;
}
// check whether log file exists or not
// if it doesn't exist, this log file might have been garbage collected.
if (!entryLogger.logExists(entryLogId)) {
continue;
}
LOG.info("Extracting entry log meta from entryLogId: {}", entryLogId);
try {
// Read through the entry log file and extract the entry log meta
EntryLogMetadata entryLogMeta = entryLogger.getEntryLogMetadata(entryLogId, throttler);
removeIfLedgerNotExists(entryLogMeta);
if (entryLogMeta.isEmpty()) {
// This means the entry log is not associated with any active
// ledgers anymore.
// We can remove this entry log file now.
LOG.info("Deleting entryLogId {} as it has no active ledgers!", entryLogId);
if (removeEntryLog(entryLogId)) {
gcStats.getReclaimedSpaceViaDeletes().addCount(entryLogMeta.getTotalSize());
} else {
gcStats.getReclaimFailedToDelete().inc();
}
} else {
entryLogMetaMap.put(entryLogId, entryLogMeta);
}
} catch (IOException | RuntimeException e) {
LOG.warn("Premature exception when processing " + entryLogId
+ " recovery will take care of the problem", e);
}
}
}
CompactableLedgerStorage getLedgerStorage() {
return ledgerStorage;
}
@VisibleForTesting
EntryLogMetadataMap getEntryLogMetaMap() {
return entryLogMetaMap;
}
public GarbageCollectionStatus getGarbageCollectionStatus() {
return GarbageCollectionStatus.builder()
.forceCompacting(forceGarbageCollection.get())
.majorCompacting(majorCompacting.get())
.minorCompacting(minorCompacting.get())
.lastMajorCompactionTime(lastMajorCompactionTime)
.lastMinorCompactionTime(lastMinorCompactionTime)
.majorCompactionCounter(gcStats.getMajorCompactionCounter().get())
.minorCompactionCounter(gcStats.getMinorCompactionCounter().get())
.build();
}
}
| 508 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/AbstractLogCompactor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.util.concurrent.RateLimiter;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* Abstract entry log compactor used for compaction.
*/
public abstract class AbstractLogCompactor {
protected final ServerConfiguration conf;
protected final Throttler throttler;
/**
* LogRemovalListener.
*/
public interface LogRemovalListener {
void removeEntryLog(long logToRemove);
}
protected final LogRemovalListener logRemovalListener;
public AbstractLogCompactor(ServerConfiguration conf, LogRemovalListener logRemovalListener) {
this.conf = conf;
this.throttler = new Throttler(conf);
this.logRemovalListener = logRemovalListener;
}
/**
* Compact entry log file.
* @param entryLogMeta log metadata for the entry log to be compacted
* @return true for succeed
*/
public abstract boolean compact(EntryLogMetadata entryLogMeta);
/**
* Do nothing by default. Intended for subclass to override this method.
*/
public void cleanUpAndRecover() {}
/**
* class Throttler.
*/
public static class Throttler {
private final RateLimiter rateLimiter;
private final boolean isThrottleByBytes;
private final AtomicBoolean cancelled = new AtomicBoolean(false);
Throttler(ServerConfiguration conf) {
this.isThrottleByBytes = conf.getIsThrottleByBytes();
this.rateLimiter = RateLimiter.create(this.isThrottleByBytes
? conf.getCompactionRateByBytes() : conf.getCompactionRateByEntries());
}
// acquire. if bybytes: bytes of this entry; if byentries: 1.
boolean tryAcquire(int permits, long timeout, TimeUnit unit) {
return rateLimiter.tryAcquire(this.isThrottleByBytes ? permits : 1, timeout, unit);
}
// GC thread will check the status for the rate limiter
// If the compactor is being stopped by other threads,
// and the GC thread is still limited, the compact task will be stopped.
public void acquire(int permits) throws IOException {
long timeout = 100;
long start = System.currentTimeMillis();
while (!tryAcquire(permits, timeout, TimeUnit.MILLISECONDS)) {
if (cancelled.get()) {
throw new IOException("Failed to get permits takes "
+ (System.currentTimeMillis() - start)
+ " ms may be compactor has been shutting down");
}
try {
TimeUnit.MILLISECONDS.sleep(timeout);
} catch (InterruptedException e) {
// ignore
}
}
}
public void cancelledAcquire() {
cancelled.set(true);
}
}
}
| 509 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/HandleFactory.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
interface HandleFactory {
LedgerDescriptor getHandle(long ledgerId, byte[] masterKey)
throws IOException, BookieException;
LedgerDescriptor getReadOnlyHandle(long ledgerId)
throws IOException, Bookie.NoLedgerException;
}
| 510 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/EntryLogScanner.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
/**
* Scan entries in a entry log file.
*/
public interface EntryLogScanner {
/**
* Tests whether or not the entries belongs to the specified ledger
* should be processed.
*
* @param ledgerId
* Ledger ID.
* @return true if and only the entries of the ledger should be scanned.
*/
boolean accept(long ledgerId);
/**
* Process an entry.
*
* @param ledgerId
* Ledger ID.
* @param offset
* File offset of this entry.
* @param entry
* Entry ByteBuf
* @throws IOException
*/
void process(long ledgerId, long offset, ByteBuf entry) throws IOException;
}
| 511 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/EntryLogIds.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage;
import java.io.IOException;
/**
* Generate unique entry log ids.
*/
public interface EntryLogIds {
/**
* Get the next available entry log ID.
*/
int nextId() throws IOException;
}
| 512 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/CompactionEntryLog.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
/**
* An entrylog to received compacted entries.
* <p/>
* The expected lifecycle for a compaction entry log is:
* 1. Creation
* 2. Mark compacted
* 3. Make available
* 4. Cleanup
* <p/>
* Abort can happen at during any step.
*/
public interface CompactionEntryLog {
/**
* Add an entry to the log.
* @param ledgerId the ledger the entry belong to
* @param entry the payload of the entry
* @return the position to which the entry was written
*/
long addEntry(long ledgerId, ByteBuf entry) throws IOException;
/**
* Scan the entry log, reading out all contained entries.
*/
void scan(EntryLogScanner scanner) throws IOException;
/**
* Flush any unwritten entries to physical storage.
*/
void flush() throws IOException;
/**
* Abort the compaction log. This should delete any resources held
* by this log.
*/
void abort();
/**
* Mark the compaction log as compacted.
* From this point, the heavy work of copying entries from one log
* to another should be done. We don't want to repeat that work,
* so this method should take steps to ensure that if the bookie crashes
* we can resume the compaction from this point.
*/
void markCompacted() throws IOException;
/**
* Make the log written by the compaction process available for reads.
*/
void makeAvailable() throws IOException;
/**
* Clean up any temporary resources that were used by the compaction process.
*/
void finalizeAndCleanup();
/**
* Get the log ID of the entrylog to which compacted entries are being written.
*/
long getDstLogId();
/**
* Get the log ID of the entrylog which is being compacted.
*/
long getSrcLogId();
}
| 513 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/EntryLogger.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.Collection;
import org.apache.bookkeeper.bookie.AbstractLogCompactor;
import org.apache.bookkeeper.bookie.Bookie.NoEntryException;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
/**
* Entry logger. Sequentially writes entries for a large number of ledgers to
* a small number of log files, to avoid many random writes.
* When an entry is added, a location is returned, which consists of the ID of the
* log into which the entry was added, and the offset of that entry within the log.
* The location is a long, with 32 bits each for the log ID and the offset. This
* naturally limits the offset and thus the size of the log to Integer.MAX_VALUE.
*/
public interface EntryLogger extends AutoCloseable {
long UNASSIGNED_LEDGERID = -1L;
// log file suffix
String LOG_FILE_SUFFIX = ".log";
/**
* Add an entry for ledger ```ledgerId``` to the entrylog.
* @param ledgerId the ledger for which the entry is being added
* @param buf the contents of the entry (this method does not take ownership of the refcount)
* @return the location in the entry log of the added entry
*/
long addEntry(long ledgerId, ByteBuf buf) throws IOException;
/**
* Read an entry from an entrylog location.
* @param entryLocation the location from which to read the entry
* @return the entry
*/
ByteBuf readEntry(long entryLocation)
throws IOException, NoEntryException;
/**
* Read an entry from an entrylog location, and verify that is matches the
* expected ledger and entry ID.
* @param ledgerId the ledgerID to match
* @param entryId the entryID to match
* @param entryLocation the location from which to read the entry
* @return the entry
*/
ByteBuf readEntry(long ledgerId, long entryId, long entryLocation)
throws IOException, NoEntryException;
/**
* Flush any outstanding writes to disk.
*/
void flush() throws IOException;
@Override
void close() throws IOException;
/**
* Create a new entrylog into which compacted entries can be added.
* There is a 1-1 mapping between logs that are being compacted
* and the log the compacted entries are written to.
*/
CompactionEntryLog newCompactionLog(long logToCompact) throws IOException;
/**
* Return a collection of all the compaction entry logs which have been
* compacted, but have not been cleaned up.
*/
Collection<CompactionEntryLog> incompleteCompactionLogs();
/**
* Get the log ids for the set of logs which have been completely flushed to
* disk.
* Only log ids in this set are considered for either compaction or garbage
* collection.
*/
Collection<Long> getFlushedLogIds();
/**
* Scan the given entrylog, returning all entries contained therein.
*/
void scanEntryLog(long entryLogId, EntryLogScanner scanner) throws IOException;
/**
* Retrieve metadata for the given entrylog ID.
* The metadata contains the size of the log, the size of the data in the log which is still
* active, and a list of all the ledgers contained in the log and the size of the data stored
* for each ledger.
*/
default EntryLogMetadata getEntryLogMetadata(long entryLogId) throws IOException {
return getEntryLogMetadata(entryLogId, null);
}
/**
* Retrieve metadata for the given entrylog ID.
* The metadata contains the size of the log, the size of the data in the log which is still
* active, and a list of all the ledgers contained in the log and the size of the data stored
* for each ledger.
*/
EntryLogMetadata getEntryLogMetadata(long entryLogId, AbstractLogCompactor.Throttler throttler) throws IOException;
/**
* Check whether an entrylog with the given ID exists.
*/
boolean logExists(long logId);
/**
* Delete the entrylog with the given ID.
* @return false if the entrylog doesn't exist.
*/
boolean removeEntryLog(long entryLogId);
}
| 514 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Provides a <i>Bookie</i> server that stores entries for clients.
*/
package org.apache.bookkeeper.bookie.storage;
| 515 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/LogReaderScan.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.IOException;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
class LogReaderScan {
static void scan(ByteBufAllocator allocator, LogReader reader, EntryLogScanner scanner) throws IOException {
int offset = Header.LOGFILE_LEGACY_HEADER_SIZE;
ByteBuf entry = allocator.directBuffer(16 * 1024 * 1024);
try {
while (offset < reader.maxOffset()) {
int initOffset = offset;
int entrySize = reader.readIntAt(offset);
if (entrySize < 0) { // padding, skip it
offset = Buffer.nextAlignment(offset);
continue;
} else if (entrySize == 0) { // preallocated space, we're done
break;
}
// The 4 bytes for the entrySize need to be added only after we
// have realigned on the block boundary.
offset += Integer.BYTES;
entry.clear();
reader.readIntoBufferAt(entry, offset, entrySize);
long ledgerId = entry.getLong(0);
if (ledgerId >= 0 && scanner.accept(ledgerId)) {
scanner.process(ledgerId, initOffset, entry);
}
offset += entrySize;
}
} finally {
ReferenceCountUtil.release(entry);
}
}
}
| 516 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectCompactionEntryLog.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static org.apache.bookkeeper.bookie.TransactionalEntryLogCompactor.COMPACTED_SUFFIX;
import static org.apache.bookkeeper.bookie.TransactionalEntryLogCompactor.COMPACTING_SUFFIX;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.concurrent.ExecutorService;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
import org.apache.bookkeeper.bookie.storage.CompactionEntryLog;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
import org.apache.bookkeeper.slogger.Slogger;
import org.apache.bookkeeper.stats.OpStatsLogger;
/**
* DirectCompactionEntryLog.
*/
public abstract class DirectCompactionEntryLog implements CompactionEntryLog {
protected final int srcLogId;
protected final int dstLogId;
protected final Slogger slog;
protected final File compactingFile;
protected final File compactedFile;
protected final File completeFile;
static CompactionEntryLog newLog(int srcLogId,
int dstLogId,
File ledgerDir,
long maxFileSize,
ExecutorService writeExecutor,
BufferPool writeBuffers,
NativeIO nativeIO,
ByteBufAllocator allocator,
Slogger slog) throws IOException {
return new WritingDirectCompactionEntryLog(
srcLogId, dstLogId, ledgerDir, maxFileSize,
writeExecutor, writeBuffers, nativeIO, allocator, slog);
}
static CompactionEntryLog recoverLog(int srcLogId,
int dstLogId,
File ledgerDir,
int readBufferSize,
int maxSaneEntrySize,
NativeIO nativeIO,
ByteBufAllocator allocator,
OpStatsLogger readBlockStats,
Slogger slog) {
return new RecoveredDirectCompactionEntryLog(srcLogId, dstLogId, ledgerDir, readBufferSize,
maxSaneEntrySize, nativeIO, allocator, readBlockStats, slog);
}
private DirectCompactionEntryLog(int srcLogId,
int dstLogId,
File ledgerDir,
Slogger slog) {
compactingFile = compactingFile(ledgerDir, dstLogId);
compactedFile = compactedFile(ledgerDir, dstLogId, srcLogId);
completeFile = DirectEntryLogger.logFile(ledgerDir, dstLogId);
this.srcLogId = srcLogId;
this.dstLogId = dstLogId;
this.slog = slog.kv("dstLogId", dstLogId).kv("srcLogId", srcLogId).ctx(DirectCompactionEntryLog.class);
}
@Override
public void abort() {
try {
Files.deleteIfExists(compactingFile.toPath());
} catch (IOException ioe) {
slog.kv("compactingFile", compactingFile).warn(Events.COMPACTION_ABORT_EXCEPTION, ioe);
}
try {
Files.deleteIfExists(compactedFile.toPath());
} catch (IOException ioe) {
slog.kv("compactedFile", compactedFile).warn(Events.COMPACTION_ABORT_EXCEPTION, ioe);
}
}
@Override
public void makeAvailable() throws IOException {
idempotentLink(compactedFile, completeFile);
slog.kv("compactedFile", compactedFile).kv("completeFile", completeFile)
.info(Events.COMPACTION_MAKE_AVAILABLE);
}
private static void idempotentLink(File src, File dst) throws IOException {
if (!src.exists()) {
throw new IOException(exMsg("src doesn't exist, aborting link")
.kv("src", src).kv("dst", dst).toString());
}
if (!dst.exists()) {
Files.createLink(dst.toPath(), src.toPath());
} else if (!Files.isSameFile(src.toPath(), dst.toPath())) {
throw new IOException(exMsg("dst exists, but doesn't match src")
.kv("src", src)
.kv("dst", dst).toString());
} // else src and dst point to the same inode so we have nothing to do
}
@Override
public void finalizeAndCleanup() {
try {
Files.deleteIfExists(compactingFile.toPath());
} catch (IOException ioe) {
slog.kv("compactingFile", compactingFile).warn(Events.COMPACTION_DELETE_FAILURE, ioe);
}
try {
Files.deleteIfExists(compactedFile.toPath());
} catch (IOException ioe) {
slog.kv("compactedFile", compactedFile).warn(Events.COMPACTION_DELETE_FAILURE, ioe);
}
slog.info(Events.COMPACTION_COMPLETE);
}
@Override
public long getDstLogId() {
return dstLogId;
}
@Override
public long getSrcLogId() {
return srcLogId;
}
private static class RecoveredDirectCompactionEntryLog extends DirectCompactionEntryLog {
private final ByteBufAllocator allocator;
private final NativeIO nativeIO;
private final int readBufferSize;
private final int maxSaneEntrySize;
private final OpStatsLogger readBlockStats;
RecoveredDirectCompactionEntryLog(int srcLogId,
int dstLogId,
File ledgerDir,
int readBufferSize,
int maxSaneEntrySize,
NativeIO nativeIO,
ByteBufAllocator allocator,
OpStatsLogger readBlockStats,
Slogger slog) {
super(srcLogId, dstLogId, ledgerDir, slog);
this.allocator = allocator;
this.nativeIO = nativeIO;
this.readBufferSize = readBufferSize;
this.maxSaneEntrySize = maxSaneEntrySize;
this.readBlockStats = readBlockStats;
this.slog.info(Events.COMPACTION_LOG_RECOVERED);
}
private IllegalStateException illegalOpException() {
return new IllegalStateException(exMsg("Invalid operation for recovered log")
.kv("srcLogId", srcLogId)
.kv("dstLogId", dstLogId)
.kv("compactingFile", compactingFile)
.kv("compactedFile", compactedFile)
.kv("completeFile", completeFile).toString());
}
@Override
public long addEntry(long ledgerId, ByteBuf entry) throws IOException {
throw illegalOpException();
}
@Override
public void flush() throws IOException {
throw illegalOpException();
}
@Override
public void markCompacted() throws IOException {
throw illegalOpException();
}
@Override
public void scan(EntryLogScanner scanner) throws IOException {
try (LogReader reader = new DirectReader(dstLogId, compactedFile.toString(), allocator, nativeIO,
readBufferSize, maxSaneEntrySize, readBlockStats)) {
LogReaderScan.scan(allocator, reader, scanner);
}
}
}
private static class WritingDirectCompactionEntryLog extends DirectCompactionEntryLog {
private final WriterWithMetadata writer;
WritingDirectCompactionEntryLog(int srcLogId,
int dstLogId,
File ledgerDir,
long maxFileSize,
ExecutorService writeExecutor,
BufferPool writeBuffers,
NativeIO nativeIO,
ByteBufAllocator allocator,
Slogger slog) throws IOException {
super(srcLogId, dstLogId, ledgerDir, slog);
this.writer = new WriterWithMetadata(
new DirectWriter(dstLogId, compactingFile.toString(), maxFileSize,
writeExecutor, writeBuffers, nativeIO, slog),
new EntryLogMetadata(dstLogId),
allocator);
this.slog.info(Events.COMPACTION_LOG_CREATED);
}
@Override
public long addEntry(long ledgerId, ByteBuf entry) throws IOException {
return writer.addEntry(ledgerId, entry);
}
@Override
public void flush() throws IOException {
writer.flush();
}
@Override
public void markCompacted() throws IOException {
writer.finalizeAndClose();
idempotentLink(compactingFile, compactedFile);
if (!compactingFile.delete()) {
slog.kv("compactingFile", compactingFile)
.kv("compactedFile", compactedFile)
.info(Events.COMPACTION_DELETE_FAILURE);
} else {
slog.kv("compactingFile", compactingFile)
.kv("compactedFile", compactedFile)
.info(Events.COMPACTION_MARK_COMPACTED);
}
}
@Override
public void scan(EntryLogScanner scanner) throws IOException {
throw new IllegalStateException(exMsg("Scan only valid for recovered log")
.kv("srcLogId", srcLogId)
.kv("dstLogId", dstLogId)
.kv("compactingFile", compactingFile)
.kv("compactedFile", compactedFile)
.kv("completeFile", completeFile).toString());
}
}
public static File compactingFile(File directory, int logId) {
return new File(directory, String.format("%x%s", logId, COMPACTING_SUFFIX));
}
public static File compactedFile(File directory, int newLogId, int compactedLogId) {
return new File(directory, String.format("%x.log.%x%s", newLogId,
compactedLogId, COMPACTED_SUFFIX));
}
}
| 517 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/LogMetadata.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.IOException;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap.BiConsumerLong;
class LogMetadata {
/**
* Ledgers map is composed of multiple parts that can be split into separated entries. Each of them is composed of:
*
* <pre>
* length: (4 bytes) [0-3]
* ledger id (-1): (8 bytes) [4 - 11]
* entry id: (8 bytes) [12-19]
* num ledgers stored in current metadata entry: (4 bytes) [20 - 23]
* ledger entries: sequence of (ledgerid, size) (8 + 8 bytes each) [24..]
* </pre>
*/
static final int LEDGERS_MAP_HEADER_SIZE = 4 + 8 + 8 + 4;
static final int LEDGERS_MAP_ENTRY_SIZE = 8 + 8;
// Break the ledgers map into multiple batches, each of which can contain up to 10K ledgers
static final int LEDGERS_MAP_MAX_BATCH_SIZE = 10000;
static final int LEDGERS_MAP_MAX_MAP_SIZE =
LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * LEDGERS_MAP_MAX_BATCH_SIZE;
static final long INVALID_LID = -1L;
// EntryId used to mark an entry (belonging to INVALID_ID)
// as a component of the serialized ledgers map
static final long LEDGERS_MAP_ENTRY_ID = -2L;
static void write(LogWriter writer,
EntryLogMetadata metadata,
ByteBufAllocator allocator)
throws IOException {
long ledgerMapOffset = writer.position();
ConcurrentLongLongHashMap ledgersMap = metadata.getLedgersMap();
int numberOfLedgers = (int) ledgersMap.size();
// Write the ledgers map into several batches
final ByteBuf serializedMap = allocator.buffer(LEDGERS_MAP_MAX_BATCH_SIZE);
BiConsumerLong writingConsumer = new BiConsumerLong() {
int remainingLedgers = numberOfLedgers;
boolean startNewBatch = true;
int remainingInBatch = 0;
@Override
public void accept(long ledgerId, long size) {
if (startNewBatch) {
int batchSize = Math.min(remainingLedgers, LEDGERS_MAP_MAX_BATCH_SIZE);
serializedMap.clear();
serializedMap.writeLong(INVALID_LID);
serializedMap.writeLong(LEDGERS_MAP_ENTRY_ID);
serializedMap.writeInt(batchSize);
startNewBatch = false;
remainingInBatch = batchSize;
}
// Dump the ledger in the current batch
serializedMap.writeLong(ledgerId);
serializedMap.writeLong(size);
--remainingLedgers;
if (--remainingInBatch == 0) {
// Close current batch
try {
writer.writeDelimited(serializedMap);
} catch (IOException e) {
throw new RuntimeException(e);
}
startNewBatch = true;
}
}
};
try {
ledgersMap.forEach(writingConsumer);
} catch (RuntimeException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw e;
}
} finally {
ReferenceCountUtil.release(serializedMap);
}
ByteBuf buf = allocator.buffer(Buffer.ALIGNMENT);
try {
Header.writeHeader(buf, ledgerMapOffset, numberOfLedgers);
writer.writeAt(0, buf);
} finally {
ReferenceCountUtil.release(buf);
}
writer.flush();
}
static EntryLogMetadata read(LogReader reader) throws IOException {
ByteBuf header = reader.readBufferAt(0, Header.LOGFILE_LEGACY_HEADER_SIZE);
try {
int headerVersion = Header.extractVersion(header);
if (headerVersion < Header.HEADER_V1) {
throw new IOException(exMsg("Old log file header").kv("headerVersion", headerVersion).toString());
}
long ledgerMapOffset = Header.extractLedgerMapOffset(header);
if (ledgerMapOffset > Integer.MAX_VALUE) {
throw new IOException(exMsg("ledgerMapOffset too high").kv("ledgerMapOffset", ledgerMapOffset)
.kv("maxOffset", Integer.MAX_VALUE).toString());
}
if (ledgerMapOffset <= 0) {
throw new IOException(exMsg("ledgerMap never written").kv("ledgerMapOffset", ledgerMapOffset)
.toString());
}
long offset = ledgerMapOffset;
EntryLogMetadata meta = new EntryLogMetadata(reader.logId());
while (offset < reader.maxOffset()) {
int mapSize = reader.readIntAt((int) offset);
if (mapSize >= LogMetadata.LEDGERS_MAP_MAX_MAP_SIZE) {
throw new IOException(exMsg("ledgerMap too large")
.kv("maxSize", LogMetadata.LEDGERS_MAP_MAX_MAP_SIZE)
.kv("mapSize", mapSize).toString());
} else if (mapSize <= 0) {
break;
}
offset += Integer.BYTES;
ByteBuf ledgerMapBuffer = reader.readBufferAt(offset, mapSize);
try {
offset += mapSize;
long ledgerId = ledgerMapBuffer.readLong();
if (ledgerId != LogMetadata.INVALID_LID) {
throw new IOException(exMsg("Bad ledgerID").kv("ledgerId", ledgerId).toString());
}
long entryId = ledgerMapBuffer.readLong();
if (entryId != LogMetadata.LEDGERS_MAP_ENTRY_ID) {
throw new IOException(exMsg("Unexpected entry ID. Expected special value")
.kv("entryIdRead", entryId)
.kv("entryIdExpected", LogMetadata.LEDGERS_MAP_ENTRY_ID).toString());
}
int countInBatch = ledgerMapBuffer.readInt();
for (int i = 0; i < countInBatch; i++) {
ledgerId = ledgerMapBuffer.readLong();
long size = ledgerMapBuffer.readLong();
meta.addLedgerSize(ledgerId, size);
}
if (ledgerMapBuffer.isReadable()) {
throw new IOException(exMsg("ledgerMapSize didn't match content")
.kv("expectedCount", countInBatch)
.kv("bufferSize", mapSize)
.kv("bytesRemaining", ledgerMapBuffer.readableBytes())
.toString());
}
} finally {
ReferenceCountUtil.release(ledgerMapBuffer);
}
}
return meta;
} catch (IOException ioe) {
throw new IOException(exMsg("Error reading index").kv("logId", reader.logId())
.kv("reason", ioe.getMessage()).toString(), ioe);
} finally {
ReferenceCountUtil.release(header);
}
}
}
| 518 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectReader.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static com.google.common.base.Preconditions.checkState;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.EOFException;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
import org.apache.bookkeeper.common.util.nativeio.NativeIOException;
import org.apache.bookkeeper.stats.OpStatsLogger;
class DirectReader implements LogReader {
private final ByteBufAllocator allocator;
private final NativeIO nativeIO;
private final Buffer nativeBuffer;
private final String filename;
private final int logId;
private final int fd;
private final int maxSaneEntrySize;
private final OpStatsLogger readBlockStats;
private long currentBlock = -1;
private long currentBlockEnd = -1;
private long maxOffset;
private boolean closed;
DirectReader(int logId, String filename, ByteBufAllocator allocator,
NativeIO nativeIO, int bufferSize,
int maxSaneEntrySize, OpStatsLogger readBlockStats) throws IOException {
this.nativeIO = nativeIO;
this.allocator = allocator;
this.logId = logId;
this.filename = filename;
this.maxSaneEntrySize = maxSaneEntrySize;
this.readBlockStats = readBlockStats;
closed = false;
try {
fd = nativeIO.open(filename,
NativeIO.O_RDONLY | NativeIO.O_DIRECT,
00755);
checkState(fd >= 0, "Open should throw exception on negative return (%d)", fd);
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage())
.kv("file", filename)
.kv("errno", ne.getErrno()).toString());
}
refreshMaxOffset();
nativeBuffer = new Buffer(nativeIO, allocator, bufferSize);
}
@Override
public int logId() {
return logId;
}
private void clearCache() {
synchronized (nativeBuffer) {
currentBlock = -1;
currentBlockEnd = -1;
}
}
@Override
public ByteBuf readBufferAt(long offset, int size) throws IOException, EOFException {
ByteBuf buf = allocator.buffer(size);
try {
readIntoBufferAt(buf, offset, size);
} catch (IOException e) {
ReferenceCountUtil.release(buf);
throw e;
}
return buf;
}
@Override
public void readIntoBufferAt(ByteBuf buffer, long offset, int size) throws IOException, EOFException {
assertValidOffset(offset);
synchronized (nativeBuffer) {
while (size > 0) {
int bytesRead = readBytesIntoBuf(buffer, offset, size);
size -= bytesRead;
offset += bytesRead;
}
}
}
@Override
public int readIntAt(long offset) throws IOException, EOFException {
assertValidOffset(offset);
synchronized (nativeBuffer) {
if (offset >= currentBlock && offset + Integer.BYTES <= currentBlockEnd) { // fast path
return nativeBuffer.readInt(offsetInBlock(offset));
} else { // slow path
ByteBuf intBuf = readBufferAt(offset, Integer.BYTES);
try {
return intBuf.getInt(0);
} finally {
ReferenceCountUtil.release(intBuf);
}
}
}
}
@Override
public long readLongAt(long offset) throws IOException, EOFException {
assertValidOffset(offset);
synchronized (nativeBuffer) {
if (offset >= currentBlock && offset + Long.BYTES <= currentBlockEnd) { // fast path
return nativeBuffer.readLong(offsetInBlock(offset));
} else { // slow path
ByteBuf longBuf = readBufferAt(offset, Long.BYTES);
try {
return longBuf.getLong(0);
} finally {
ReferenceCountUtil.release(longBuf);
}
}
}
}
private int readBytesIntoBuf(ByteBuf buf, long offset, int size) throws IOException, EOFException {
synchronized (nativeBuffer) {
if (offset < currentBlock || offset >= currentBlockEnd) {
readBlock(offset);
}
int offsetInBuffer = offsetInBlock(offset);
int sizeInBuffer = sizeInBlock(offset, size);
if (sizeInBuffer <= 0) {
throw new EOFException(exMsg("Not enough bytes available")
.kv("file", filename)
.kv("fileSize", maxOffset)
.kv("offset", offset)
.kv("size", size).toString());
}
return nativeBuffer.readByteBuf(buf, offsetInBuffer, size);
}
}
@Override
public ByteBuf readEntryAt(int offset) throws IOException, EOFException {
assertValidEntryOffset(offset);
int sizeOffset = offset - Integer.BYTES;
if (sizeOffset < 0) {
throw new IOException(exMsg("Invalid offset, buffer size missing")
.kv("file", filename)
.kv("offset", offset).toString());
}
int entrySize = readIntAt(sizeOffset);
if (entrySize == 0) {
// reading an entry with size 0 may mean reading from preallocated
// space. if we receive an offset in preallocated space, it may
// mean that a write has occurred and been flushed, but our view
// of that block is out of date. So clear the cache and let it be
// loaded again.
clearCache();
entrySize = readIntAt(sizeOffset);
}
if (entrySize > maxSaneEntrySize || entrySize <= 0) {
throw new IOException(exMsg("Invalid entry size")
.kv("file", filename)
.kv("offset", offset)
.kv("maxSaneEntrySize", maxSaneEntrySize)
.kv("readEntrySize", entrySize).toString());
}
return readBufferAt(offset, entrySize);
}
void readBlock(long offset) throws IOException {
final int blockSize = nativeBuffer.size();
assertValidBlockSize(blockSize);
final long blockStart = offset & ~(blockSize - 1);
if (blockStart + blockSize > maxOffset) {
// Check if there's new data in the file
refreshMaxOffset();
}
final long bytesAvailable = maxOffset > blockStart ? maxOffset - blockStart : 0;
final long startNs = System.nanoTime();
long bufferOffset = 0;
long bytesToRead = Math.min(blockSize, bytesAvailable);
long bytesOutstanding = bytesToRead;
long bytesRead = -1;
try {
while (true) {
long readSize = blockSize - bufferOffset;
long pointerWithOffset = nativeBuffer.pointer(bufferOffset, readSize);
bytesRead = nativeIO.pread(fd, pointerWithOffset,
readSize,
blockStart + bufferOffset);
// offsets and counts must be aligned, so ensure that if we
// get a short read, we don't throw off the alignment. For example
// if we're trying to read 12K and we only managed 100 bytes,
// we don't progress the offset or outstanding at all. However, if we
// read 4196 bytes, we can progress the offset by 4KB and the outstanding
// bytes will then be 100.
// the only non-short read that isn't aligned is the bytes at the end of
// of the file, which is why we don't align before we check if we should
// exit the loop
if ((bytesOutstanding - bytesRead) <= 0) {
break;
}
bytesOutstanding -= bytesRead & Buffer.ALIGNMENT;
bufferOffset += bytesRead & Buffer.ALIGNMENT;
}
} catch (NativeIOException ne) {
readBlockStats.registerFailedEvent(System.nanoTime() - startNs, TimeUnit.NANOSECONDS);
throw new IOException(exMsg(ne.getMessage())
.kv("requestedBytes", blockSize)
.kv("offset", blockStart)
.kv("expectedBytes", Math.min(blockSize, bytesAvailable))
.kv("bytesOutstanding", bytesOutstanding)
.kv("bufferOffset", bufferOffset)
.kv("file", filename)
.kv("fd", fd)
.kv("errno", ne.getErrno()).toString());
}
readBlockStats.registerSuccessfulEvent(System.nanoTime() - startNs, TimeUnit.NANOSECONDS);
currentBlock = blockStart;
currentBlockEnd = blockStart + Math.min(blockSize, bytesAvailable);
}
@Override
public void close() throws IOException {
synchronized (nativeBuffer) {
nativeBuffer.free();
}
try {
int ret = nativeIO.close(fd);
checkState(ret == 0, "Close should throw exception on non-zero return (%d)", ret);
closed = true;
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage())
.kv("file", filename)
.kv("errno", ne.getErrno()).toString());
}
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public long maxOffset() {
return maxOffset;
}
private void refreshMaxOffset() throws IOException {
try {
long ret = nativeIO.lseek(fd, 0, NativeIO.SEEK_END);
checkState(ret >= 0,
"Lseek should throw exception on negative return (%d)", ret);
synchronized (this) {
maxOffset = ret;
}
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage())
.kv("file", filename)
.kv("fd", fd)
.kv("errno", ne.getErrno()).toString());
}
}
private int offsetInBlock(long offset) {
long blockOffset = offset - currentBlock;
if (blockOffset < 0 || blockOffset > Integer.MAX_VALUE) {
throw new IllegalArgumentException(exMsg("Invalid offset passed")
.kv("offset", offset).kv("blockOffset", blockOffset)
.kv("currentBlock", currentBlock).toString());
}
return (int) blockOffset;
}
private int sizeInBlock(long offset, int size) {
if (offset > currentBlockEnd || offset < currentBlock) {
throw new IllegalArgumentException(exMsg("Invalid offset passed")
.kv("offset", offset)
.kv("currentBlock", currentBlock)
.kv("currentBlockEnd", currentBlockEnd).toString());
}
long available = currentBlockEnd - offset;
checkState(available <= Integer.MAX_VALUE, "Available(%d) must be less than max int", available);
return Math.min(size, (int) available);
}
private static void assertValidOffset(long offset) {
if (offset < 0) {
throw new IllegalArgumentException(
exMsg("Offset can't be negative").kv("offset", offset).toString());
}
}
private static void assertValidEntryOffset(long offset) {
assertValidOffset(offset);
if (offset > Integer.MAX_VALUE) {
throw new IllegalArgumentException(
exMsg("Entry offset must be less than max int").kv("offset", offset).toString());
}
}
private static void assertValidBlockSize(int blockSize) {
boolean valid = blockSize > 0 && Buffer.isAligned(blockSize);
if (!valid) {
throw new IllegalArgumentException(
exMsg("Invalid block size, must be power of 2")
.kv("blockSize", blockSize)
.kv("minBlockSize", Buffer.ALIGNMENT).toString());
}
}
}
| 519 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectWriter.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
import org.apache.bookkeeper.common.util.nativeio.NativeIOException;
import org.apache.bookkeeper.slogger.Slogger;
import org.apache.commons.lang3.SystemUtils;
class DirectWriter implements LogWriter {
final NativeIO nativeIO;
final int fd;
final int id;
final String filename;
final BufferPool bufferPool;
final ExecutorService writeExecutor;
final Object bufferLock = new Object();
final List<Future<?>> outstandingWrites = new ArrayList<Future<?>>();
final Slogger slog;
Buffer nativeBuffer;
long offset;
private static volatile boolean useFallocate = true;
DirectWriter(int id,
String filename,
long maxFileSize,
ExecutorService writeExecutor,
BufferPool bufferPool,
NativeIO nativeIO, Slogger slog) throws IOException {
checkArgument(maxFileSize > 0, "Max file size (%d) must be positive");
this.id = id;
this.filename = filename;
this.writeExecutor = writeExecutor;
this.nativeIO = nativeIO;
this.slog = slog.ctx(DirectWriter.class);
offset = 0;
try {
fd = nativeIO.open(filename,
NativeIO.O_CREAT | NativeIO.O_WRONLY | NativeIO.O_DIRECT,
00644);
checkState(fd >= 0, "Open should have thrown exception, fd is invalid : %d", fd);
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage()).kv("file", filename)
.kv("errno", ne.getErrno()).toString(), ne);
}
if (useFallocate) {
if (!SystemUtils.IS_OS_LINUX) {
disableUseFallocate();
this.slog.warn(Events.FALLOCATE_NOT_AVAILABLE);
} else {
try {
int ret = nativeIO.fallocate(fd, NativeIO.FALLOC_FL_ZERO_RANGE, 0, maxFileSize);
checkState(ret == 0, "Exception should have been thrown on non-zero ret: %d", ret);
} catch (NativeIOException ex) {
// fallocate(2) is not supported on all filesystems. Since this is an optimization, disable
// subsequent usage instead of failing the operation.
disableUseFallocate();
this.slog.kv("message", ex.getMessage())
.kv("file", filename)
.kv("errno", ex.getErrno())
.warn(Events.FALLOCATE_NOT_AVAILABLE);
}
}
}
this.bufferPool = bufferPool;
this.nativeBuffer = bufferPool.acquire();
}
private static void disableUseFallocate() {
DirectWriter.useFallocate = false;
}
@Override
public int logId() {
return id;
}
@Override
public void writeAt(long offset, ByteBuf buf) throws IOException {
checkArgument(Buffer.isAligned(offset),
"Offset to writeAt must be aligned to %d: %d is not", Buffer.ALIGNMENT, offset);
checkArgument(Buffer.isAligned(buf.readableBytes()),
"Buffer must write multiple of alignment bytes (%d), %d is not",
Buffer.ALIGNMENT, buf.readableBytes());
int bytesToWrite = buf.readableBytes();
if (bytesToWrite <= 0) {
return;
}
Buffer tmpBuffer = bufferPool.acquire();
tmpBuffer.reset();
tmpBuffer.writeByteBuf(buf);
Future<?> f = writeExecutor.submit(() -> {
writeByteBuf(tmpBuffer, bytesToWrite, offset);
return null;
});
addOutstandingWrite(f);
}
private void writeByteBuf(Buffer buffer, int bytesToWrite, long offsetToWrite) throws IOException{
try {
if (bytesToWrite <= 0) {
return;
}
int ret = nativeIO.pwrite(fd, buffer.pointer(), bytesToWrite, offsetToWrite);
if (ret != bytesToWrite) {
throw new IOException(exMsg("Incomplete write")
.kv("filename", filename)
.kv("pointer", buffer.pointer())
.kv("offset", offsetToWrite)
.kv("writeSize", bytesToWrite)
.kv("bytesWritten", ret)
.toString());
}
} catch (NativeIOException ne) {
throw new IOException(exMsg("Write error")
.kv("filename", filename)
.kv("offset", offsetToWrite)
.kv("writeSize", bytesToWrite)
.kv("pointer", buffer.pointer())
.kv("errno", ne.getErrno())
.toString());
} finally {
bufferPool.release(buffer);
}
}
@Override
public int writeDelimited(ByteBuf buf) throws IOException {
synchronized (bufferLock) {
if (!nativeBuffer.hasSpace(serializedSize(buf))) {
flushBuffer();
}
int readable = buf.readableBytes();
long bufferPosition = position() + Integer.BYTES;
if (bufferPosition > Integer.MAX_VALUE) {
throw new IOException(exMsg("Cannot write past max int")
.kv("filename", filename)
.kv("writeSize", readable)
.kv("position", bufferPosition)
.toString());
}
nativeBuffer.writeInt(readable);
nativeBuffer.writeByteBuf(buf);
return (int) bufferPosition;
}
}
@Override
public void position(long offset) throws IOException {
synchronized (bufferLock) {
if (nativeBuffer != null && nativeBuffer.position() > 0) {
flushBuffer();
}
if ((offset % Buffer.ALIGNMENT) != 0) {
throw new IOException(exMsg("offset must be multiple of alignment")
.kv("offset", offset)
.kv("alignment", Buffer.ALIGNMENT)
.toString());
}
this.offset = offset;
}
}
@Override
public long position() {
synchronized (bufferLock) {
return this.offset + (nativeBuffer != null ? nativeBuffer.position() : 0);
}
}
@Override
public void flush() throws IOException {
flushBuffer();
waitForOutstandingWrites();
try {
int ret = nativeIO.fsync(fd);
checkState(ret == 0, "Fsync should throw exception on non-zero return (%d)", ret);
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage())
.kv("file", filename)
.kv("errno", ne.getErrno()).toString());
}
}
@Override
public void close() throws IOException {
synchronized (bufferLock) {
if (nativeBuffer != null && nativeBuffer.position() > 0) {
flush();
}
}
try {
int ret = nativeIO.close(fd);
checkState(ret == 0, "Close should throw exception on non-zero return (%d)", ret);
} catch (NativeIOException ne) {
throw new IOException(exMsg(ne.getMessage())
.kv("file", filename)
.kv("errno", ne.getErrno()).toString());
} finally {
synchronized (bufferLock) {
bufferPool.release(nativeBuffer);
nativeBuffer = null;
}
}
}
private void addOutstandingWrite(Future<?> toAdd) throws IOException {
synchronized (outstandingWrites) {
outstandingWrites.add(toAdd);
Iterator<Future<?>> iter = outstandingWrites.iterator();
while (iter.hasNext()) { // clear out completed futures
Future<?> f = iter.next();
if (f.isDone()) {
waitForFuture(f);
iter.remove();
} else {
break;
}
}
}
}
private void waitForOutstandingWrites() throws IOException {
synchronized (outstandingWrites) {
Iterator<Future<?>> iter = outstandingWrites.iterator();
while (iter.hasNext()) { // clear out completed futures
Future<?> f = iter.next();
waitForFuture(f);
iter.remove();
}
}
}
private void waitForFuture(Future<?> f) throws IOException {
try {
f.get();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException(ie);
} catch (Throwable t) {
if (t.getCause() instanceof IOException) {
throw (IOException) t.getCause();
} else {
throw new IOException(t);
}
}
}
private void flushBuffer() throws IOException {
synchronized (bufferLock) {
if (this.nativeBuffer != null) {
int bytesToWrite = this.nativeBuffer.padToAlignment();
if (bytesToWrite == 0) {
return;
}
Buffer bufferToFlush = this.nativeBuffer;
this.nativeBuffer = null;
long offsetToWrite = offset;
offset += bytesToWrite;
Future<?> f = writeExecutor.submit(() -> {
writeByteBuf(bufferToFlush, bytesToWrite, offsetToWrite);
return null;
});
addOutstandingWrite(f);
// must acquire after triggering the write
// otherwise it could try to acquire a buffer without kicking off
// a subroutine that will free another
this.nativeBuffer = bufferPool.acquire();
}
}
}
@Override
public int serializedSize(ByteBuf buf) {
return buf.readableBytes() + Integer.BYTES;
}
}
| 520 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLogger.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.bookkeeper.bookie.TransactionalEntryLogCompactor.COMPACTING_SUFFIX;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import org.apache.bookkeeper.bookie.AbstractLogCompactor;
import org.apache.bookkeeper.bookie.Bookie.NoEntryException;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
import org.apache.bookkeeper.bookie.storage.CompactionEntryLog;
import org.apache.bookkeeper.bookie.storage.EntryLogIds;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
import org.apache.bookkeeper.slogger.Slogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.LedgerDirUtil;
/**
* DirectEntryLogger.
*/
public class DirectEntryLogger implements EntryLogger {
private final Slogger slog;
private final File ledgerDir;
private final EntryLogIds ids;
private final ExecutorService writeExecutor;
private final ExecutorService flushExecutor;
private final long maxFileSize;
private final DirectEntryLoggerStats stats;
private final ByteBufAllocator allocator;
private final BufferPool writeBuffers;
private final int readBufferSize;
private final int maxSaneEntrySize;
private final Set<Integer> unflushedLogs;
private WriterWithMetadata curWriter;
private List<Future<?>> pendingFlushes;
private final NativeIO nativeIO;
private final List<Cache<?, ?>> allCaches = new CopyOnWriteArrayList<>();
private final ThreadLocal<Cache<Integer, LogReader>> caches;
private static final int NUMBER_OF_WRITE_BUFFERS = 8;
public DirectEntryLogger(File ledgerDir,
EntryLogIds ids,
NativeIO nativeIO,
ByteBufAllocator allocator,
ExecutorService writeExecutor,
ExecutorService flushExecutor,
long maxFileSize,
int maxSaneEntrySize,
long totalWriteBufferSize,
long totalReadBufferSize,
int readBufferSize,
int numReadThreads,
int maxFdCacheTimeSeconds,
Slogger slogParent,
StatsLogger stats) throws IOException {
this.ledgerDir = ledgerDir;
this.flushExecutor = flushExecutor;
this.writeExecutor = writeExecutor;
this.pendingFlushes = new ArrayList<>();
this.nativeIO = nativeIO;
this.unflushedLogs = ConcurrentHashMap.newKeySet();
this.maxFileSize = maxFileSize;
this.maxSaneEntrySize = maxSaneEntrySize;
this.readBufferSize = Buffer.nextAlignment(readBufferSize);
this.ids = ids;
this.slog = slogParent.kv("directory", ledgerDir).ctx(DirectEntryLogger.class);
this.stats = new DirectEntryLoggerStats(stats);
this.allocator = allocator;
int singleWriteBufferSize = Buffer.nextAlignment((int) (totalWriteBufferSize / NUMBER_OF_WRITE_BUFFERS));
this.writeBuffers = new BufferPool(nativeIO, allocator, singleWriteBufferSize, NUMBER_OF_WRITE_BUFFERS);
// The total read buffer memory needs to get split across all the read threads, since the caches
// are thread-specific and we want to ensure we don't pass the total memory limit.
long perThreadBufferSize = totalReadBufferSize / numReadThreads;
// if the amount of total read buffer size is too low, and/or the number of read threads is too high
// then the perThreadBufferSize can be lower than the readBufferSize causing immediate eviction of readers
// from the cache
if (perThreadBufferSize < readBufferSize) {
slog.kv("reason", "perThreadBufferSize lower than readBufferSize (causes immediate reader cache eviction)")
.kv("totalReadBufferSize", totalReadBufferSize)
.kv("totalNumReadThreads", numReadThreads)
.kv("readBufferSize", readBufferSize)
.kv("perThreadBufferSize", perThreadBufferSize)
.error(Events.ENTRYLOGGER_MISCONFIGURED);
}
long maxCachedReadersPerThread = perThreadBufferSize / readBufferSize;
long maxCachedReaders = maxCachedReadersPerThread * numReadThreads;
this.slog
.kv("maxFileSize", maxFileSize)
.kv("maxSaneEntrySize", maxSaneEntrySize)
.kv("totalWriteBufferSize", totalWriteBufferSize)
.kv("singleWriteBufferSize", singleWriteBufferSize)
.kv("totalReadBufferSize", totalReadBufferSize)
.kv("readBufferSize", readBufferSize)
.kv("perThreadBufferSize", perThreadBufferSize)
.kv("maxCachedReadersPerThread", maxCachedReadersPerThread)
.kv("maxCachedReaders", maxCachedReaders)
.info(Events.ENTRYLOGGER_CREATED);
this.caches = ThreadLocal.withInitial(() -> {
RemovalListener<Integer, LogReader> rl = (notification) -> {
try {
notification.getValue().close();
this.stats.getCloseReaderCounter().inc();
} catch (IOException ioe) {
slog.kv("logID", notification.getKey()).error(Events.READER_CLOSE_ERROR);
}
};
Cache<Integer, LogReader> cache = CacheBuilder.newBuilder()
.maximumWeight(perThreadBufferSize)
.weigher((key, value) -> readBufferSize)
.removalListener(rl)
.expireAfterAccess(maxFdCacheTimeSeconds, TimeUnit.SECONDS)
.concurrencyLevel(1) // important to avoid too aggressive eviction
.build();
allCaches.add(cache);
return cache;
});
}
@Override
public long addEntry(long ledgerId, ByteBuf buf) throws IOException {
long start = System.nanoTime();
long offset;
synchronized (this) {
if (curWriter != null
&& curWriter.shouldRoll(buf, maxFileSize)) {
// roll the log. asynchronously flush and close current log
flushAndCloseCurrent();
curWriter = null;
}
if (curWriter == null) {
int newId = ids.nextId();
curWriter = new WriterWithMetadata(newDirectWriter(newId),
new EntryLogMetadata(newId),
allocator);
slog.kv("newLogId", newId).info(Events.LOG_ROLL);
}
offset = curWriter.addEntry(ledgerId, buf);
}
stats.getAddEntryStats().registerSuccessfulEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
return offset;
}
@Override
public ByteBuf readEntry(long entryLocation)
throws IOException, NoEntryException {
return internalReadEntry(-1L, -1L, entryLocation, false);
}
@Override
public ByteBuf readEntry(long ledgerId, long entryId, long entryLocation)
throws IOException, NoEntryException {
return internalReadEntry(ledgerId, entryId, entryLocation, true);
}
private LogReader getReader(int logId) throws IOException {
Cache<Integer, LogReader> cache = caches.get();
try {
LogReader reader = cache.get(logId, () -> {
this.stats.getOpenReaderCounter().inc();
return newDirectReader(logId);
});
// it is possible though unlikely, that the cache has already cleaned up this cache entry
// during the get operation. This is more likely to happen when there is great demand
// for many separate readers in a low memory environment.
if (reader.isClosed()) {
this.stats.getCachedReadersServedClosedCounter().inc();
throw new IOException(exMsg("Cached reader already closed").kv("logId", logId).toString());
}
return reader;
} catch (ExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) ee.getCause();
} else {
throw new IOException(exMsg("Error loading reader in cache").kv("logId", logId).toString(), ee);
}
}
}
private ByteBuf internalReadEntry(long ledgerId, long entryId, long location, boolean validateEntry)
throws IOException, NoEntryException {
int logId = (int) (location >> 32);
int pos = (int) (location & 0xFFFFFFFF);
long start = System.nanoTime();
LogReader reader = getReader(logId);
try {
ByteBuf buf = reader.readEntryAt(pos);
if (validateEntry) {
long thisLedgerId = buf.getLong(0);
long thisEntryId = buf.getLong(8);
if (thisLedgerId != ledgerId
|| thisEntryId != entryId) {
throw new IOException(
exMsg("Bad location").kv("location", location)
.kv("expectedLedger", ledgerId).kv("expectedEntry", entryId)
.kv("foundLedger", thisLedgerId).kv("foundEntry", thisEntryId)
.toString());
}
}
stats.getReadEntryStats().registerSuccessfulEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
return buf;
} catch (EOFException eof) {
stats.getReadEntryStats().registerFailedEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
throw new NoEntryException(
exMsg("Entry location doesn't exist").kv("location", location).toString(),
ledgerId, entryId);
}
}
@Override
public void flush() throws IOException {
long start = System.nanoTime();
Future<?> currentFuture = flushCurrent();
List<Future<?>> outstandingFlushes;
synchronized (this) {
outstandingFlushes = this.pendingFlushes;
this.pendingFlushes = new ArrayList<>();
}
outstandingFlushes.add(currentFuture);
for (Future<?> f: outstandingFlushes) {
try {
f.get();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Interruped while flushing", ie);
} catch (ExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) ee.getCause();
} else {
throw new IOException("Exception flushing writer", ee);
}
}
}
stats.getFlushStats().registerSuccessfulEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
}
private Future<?> flushCurrent() throws IOException {
WriterWithMetadata flushWriter;
synchronized (this) {
flushWriter = this.curWriter;
}
if (flushWriter != null) {
return flushExecutor.submit(() -> {
long start = System.nanoTime();
try {
flushWriter.flush();
stats.getWriterFlushStats().registerSuccessfulEvent(
System.nanoTime() - start, TimeUnit.NANOSECONDS);
} catch (Throwable t) {
stats.getWriterFlushStats().registerFailedEvent(
System.nanoTime() - start, TimeUnit.NANOSECONDS);
throw t;
}
return null;
});
} else {
return CompletableFuture.completedFuture(null);
}
}
private void flushAndCloseCurrent() throws IOException {
WriterWithMetadata flushWriter;
CompletableFuture<Void> flushPromise = new CompletableFuture<>();
synchronized (this) {
flushWriter = this.curWriter;
this.curWriter = null;
pendingFlushes.add(flushPromise);
}
if (flushWriter != null) {
flushExecutor.execute(() -> {
long start = System.nanoTime();
try {
flushWriter.finalizeAndClose();
stats.getWriterFlushStats()
.registerSuccessfulEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
unflushedLogs.remove(flushWriter.logId());
flushPromise.complete(null);
} catch (Throwable t) {
stats.getWriterFlushStats()
.registerFailedEvent(System.nanoTime() - start, TimeUnit.NANOSECONDS);
flushPromise.completeExceptionally(t);
}
});
} else {
flushPromise.complete(null);
}
}
@Override
public void close() throws IOException {
flushAndCloseCurrent(); // appends metadata to current log
flush(); // wait for all outstanding flushes
for (Cache<?, ?> c : allCaches) {
c.invalidateAll();
}
writeBuffers.close();
}
@Override
public Collection<Long> getFlushedLogIds() {
return LedgerDirUtil.logIdsInDirectory(ledgerDir).stream()
.filter(logId -> !unflushedLogs.contains(logId))
.map(i -> Long.valueOf(i))
.collect(Collectors.toList());
}
@Override
public boolean removeEntryLog(long entryLogId) {
checkArgument(entryLogId < Integer.MAX_VALUE, "Entry log id must be an int [%d]", entryLogId);
File file = logFile(ledgerDir, (int) entryLogId);
boolean result = file.delete();
slog.kv("file", file).kv("logId", entryLogId).kv("result", result).info(Events.LOG_DELETED);
return result;
}
@Override
public void scanEntryLog(long entryLogId, EntryLogScanner scanner) throws IOException {
checkArgument(entryLogId < Integer.MAX_VALUE, "Entry log id must be an int [%d]", entryLogId);
try (LogReader reader = newDirectReader((int) entryLogId)) {
LogReaderScan.scan(allocator, reader, scanner);
}
}
@Override
public boolean logExists(long logId) {
checkArgument(logId < Integer.MAX_VALUE, "Entry log id must be an int [%d]", logId);
return logFile(ledgerDir, (int) logId).exists();
}
@Override
public EntryLogMetadata getEntryLogMetadata(long entryLogId, AbstractLogCompactor.Throttler throttler)
throws IOException {
try {
return readEntryLogIndex(entryLogId);
} catch (IOException e) {
slog.kv("entryLogId", entryLogId).kv("reason", e.getMessage())
.info(Events.READ_METADATA_FALLBACK);
return scanEntryLogMetadata(entryLogId, throttler);
}
}
@VisibleForTesting
EntryLogMetadata readEntryLogIndex(long logId) throws IOException {
checkArgument(logId < Integer.MAX_VALUE, "Entry log id must be an int [%d]", logId);
try (LogReader reader = newDirectReader((int) logId)) {
return LogMetadata.read(reader);
}
}
@VisibleForTesting
EntryLogMetadata scanEntryLogMetadata(long logId, AbstractLogCompactor.Throttler throttler) throws IOException {
final EntryLogMetadata meta = new EntryLogMetadata(logId);
// Read through the entry log file and extract the entry log meta
scanEntryLog(logId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
// add new entry size of a ledger to entry log meta
if (throttler != null) {
throttler.acquire(entry.readableBytes());
}
meta.addLedgerSize(ledgerId, entry.readableBytes() + Integer.BYTES);
}
@Override
public boolean accept(long ledgerId) {
return ledgerId >= 0;
}
});
return meta;
}
@VisibleForTesting
LogReader newDirectReader(int logId) throws IOException {
return new DirectReader(logId, logFilename(ledgerDir, logId),
allocator, nativeIO, readBufferSize,
maxSaneEntrySize, stats.getReadBlockStats());
}
private LogWriter newDirectWriter(int newId) throws IOException {
unflushedLogs.add(newId);
LogWriter writer = new DirectWriter(newId, logFilename(ledgerDir, newId), maxFileSize,
writeExecutor, writeBuffers, nativeIO, slog);
ByteBuf buf = allocator.buffer(Buffer.ALIGNMENT);
try {
Header.writeEmptyHeader(buf);
writer.writeAt(0, buf);
writer.position(buf.capacity());
} finally {
ReferenceCountUtil.release(buf);
}
return writer;
}
public static File logFile(File directory, int logId) {
return new File(directory, Long.toHexString(logId) + LOG_FILE_SUFFIX);
}
public static String logFilename(File directory, int logId) {
return logFile(directory, logId).toString();
}
@Override
public CompactionEntryLog newCompactionLog(long srcLogId) throws IOException {
int dstLogId = ids.nextId();
return DirectCompactionEntryLog.newLog((int) srcLogId, dstLogId, ledgerDir,
maxFileSize, writeExecutor, writeBuffers,
nativeIO, allocator, slog);
}
@Override
public Collection<CompactionEntryLog> incompleteCompactionLogs() {
List<CompactionEntryLog> logs = new ArrayList<>();
if (ledgerDir.exists() && ledgerDir.isDirectory()) {
File[] files = ledgerDir.listFiles();
if (files != null && files.length > 0) {
for (File f : files) {
if (f.getName().endsWith(COMPACTING_SUFFIX)) {
try {
Files.deleteIfExists(f.toPath());
} catch (IOException ioe) {
slog.kv("file", f).warn(Events.COMPACTION_DELETE_FAILURE);
}
}
Matcher m = LedgerDirUtil.COMPACTED_FILE_PATTERN.matcher(f.getName());
if (m.matches()) {
int dstLogId = Integer.parseUnsignedInt(m.group(1), 16);
int srcLogId = Integer.parseUnsignedInt(m.group(2), 16);
logs.add(DirectCompactionEntryLog.recoverLog(srcLogId, dstLogId, ledgerDir,
readBufferSize, maxSaneEntrySize,
nativeIO, allocator,
stats.getReadBlockStats(),
slog));
}
}
}
}
return logs;
}
}
| 521 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/DirectEntryLoggerStats.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Direct entry logger stats"
)
class DirectEntryLoggerStats {
private static final String ADD_ENTRY = "entrylog-add-entry";
private static final String READ_ENTRY = "entrylog-read-entry";
private static final String FLUSH = "entrylog-flush";
private static final String WRITER_FLUSH = "entrylog-writer-flush";
private static final String READ_BLOCK = "entrylog-read-block";
private static final String READER_OPEN = "entrylog-open-reader";
private static final String READER_CLOSE = "entrylog-close-reader";
private static final String CACHED_READER_SERVED_CLOSED = "entrylog-cached-reader-closed";
@StatsDoc(
name = ADD_ENTRY,
help = "Operation stats of adding entries to the entry log",
parent = BOOKIE_ADD_ENTRY
)
private final OpStatsLogger addEntryStats;
@StatsDoc(
name = READ_ENTRY,
help = "Operation stats of reading entries from the entry log",
parent = BOOKIE_READ_ENTRY
)
private static ThreadLocal<OpStatsLogger> readEntryStats;
@StatsDoc(
name = FLUSH,
help = "Stats for persisting outstanding entrylog writes to disk"
)
private final OpStatsLogger flushStats;
@StatsDoc(
name = WRITER_FLUSH,
help = "Stats for persisting outstanding entrylog writes for a single writer"
)
private final OpStatsLogger writerFlushStats;
@StatsDoc(
name = READ_BLOCK,
help = "Stats for reading blocks from disk"
)
private static ThreadLocal<OpStatsLogger> readBlockStats;
@StatsDoc(
name = READER_OPEN,
help = "Stats for reader open operations"
)
private static ThreadLocal<Counter> openReaderStats;
@StatsDoc(
name = READER_CLOSE,
help = "Stats for reader close operations"
)
private static ThreadLocal<Counter> closeReaderStats;
@StatsDoc(
name = CACHED_READER_SERVED_CLOSED,
help = "Stats for cached readers being served closed"
)
private static ThreadLocal<Counter> cachedReadersServedClosed;
DirectEntryLoggerStats(StatsLogger stats) {
addEntryStats = stats.getOpStatsLogger(ADD_ENTRY);
flushStats = stats.getOpStatsLogger(FLUSH);
writerFlushStats = stats.getOpStatsLogger(WRITER_FLUSH);
setStats(stats);
}
private static synchronized void setStats(StatsLogger stats) {
readEntryStats = new ThreadLocal<OpStatsLogger>() {
@Override
public OpStatsLogger initialValue() {
return stats.scopeLabel("thread", String.valueOf(Thread.currentThread().getId()))
.getOpStatsLogger(READ_ENTRY);
}
};
readBlockStats = new ThreadLocal<OpStatsLogger>() {
@Override
public OpStatsLogger initialValue() {
return stats.scopeLabel("thread", String.valueOf(Thread.currentThread().getId()))
.getOpStatsLogger(READ_BLOCK);
}
};
DirectEntryLoggerStats.openReaderStats = new ThreadLocal<Counter>() {
@Override
public Counter initialValue() {
return stats.scopeLabel("thread", String.valueOf(Thread.currentThread().getId()))
.getCounter(READER_OPEN);
}
};
DirectEntryLoggerStats.closeReaderStats = new ThreadLocal<Counter>() {
@Override
public Counter initialValue() {
return stats.scopeLabel("thread", String.valueOf(Thread.currentThread().getId()))
.getCounter(READER_CLOSE);
}
};
DirectEntryLoggerStats.cachedReadersServedClosed = new ThreadLocal<Counter>() {
@Override
public Counter initialValue() {
return stats.scopeLabel("thread", String.valueOf(Thread.currentThread().getId()))
.getCounter(CACHED_READER_SERVED_CLOSED);
}
};
}
OpStatsLogger getAddEntryStats() {
return addEntryStats;
}
OpStatsLogger getFlushStats() {
return flushStats;
}
OpStatsLogger getWriterFlushStats() {
return writerFlushStats;
}
OpStatsLogger getReadEntryStats() {
return readEntryStats.get();
}
OpStatsLogger getReadBlockStats() {
return readBlockStats.get();
}
Counter getOpenReaderCounter() {
return openReaderStats.get();
}
Counter getCloseReaderCounter() {
return closeReaderStats.get();
}
Counter getCachedReadersServedClosedCounter() {
return cachedReadersServedClosed.get();
}
}
| 522 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/WriterWithMetadata.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static com.google.common.base.Preconditions.checkState;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
/**
* WriterWithMetadata.
*/
class WriterWithMetadata {
private final LogWriter writer;
private final EntryLogMetadata metadata;
private final ByteBufAllocator allocator;
WriterWithMetadata(LogWriter writer, EntryLogMetadata metadata,
ByteBufAllocator allocator) throws IOException {
this.writer = writer;
this.metadata = metadata;
this.allocator = allocator;
ByteBuf buf = allocator.buffer(Buffer.ALIGNMENT);
try {
Header.writeEmptyHeader(buf);
writer.writeAt(0, buf);
writer.position(buf.capacity());
} finally {
buf.release();
}
}
int logId() {
return writer.logId();
}
boolean shouldRoll(ByteBuf entry, long rollThreshold) throws IOException {
return (writer.position() + writer.serializedSize(entry)) > rollThreshold;
}
long addEntry(long ledgerId, ByteBuf entry) throws IOException {
int size = entry.readableBytes();
metadata.addLedgerSize(ledgerId, size + Integer.BYTES);
long offset = writer.writeDelimited(entry);
checkState(offset < Integer.MAX_VALUE, "Offsets can't be higher than max int (%d)", offset);
return ((long) writer.logId()) << 32 | offset;
}
void flush() throws IOException {
writer.flush();
}
void finalizeAndClose() throws IOException {
writer.flush();
LogMetadata.write(writer, metadata, allocator);
writer.close();
}
}
| 523 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/BufferPool.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import java.util.concurrent.ArrayBlockingQueue;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
/**
* BufferPool used to manage Buffers.
*/
public class BufferPool implements AutoCloseable {
private final ArrayBlockingQueue<Buffer> pool;
BufferPool(NativeIO nativeIO, ByteBufAllocator allocator, int bufferSize, int maxPoolSize) throws IOException {
pool = new ArrayBlockingQueue<>(maxPoolSize);
for (int i = 0; i < maxPoolSize; i++) {
pool.add(new Buffer(nativeIO, allocator, bufferSize));
}
}
Buffer acquire() throws IOException {
try {
return pool.take();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException(ie);
}
}
void release(Buffer buffer) {
buffer.reset();
if (!pool.add(buffer)) {
buffer.free();
}
}
@Override
public void close() {
while (true) {
Buffer b = pool.poll();
if (b == null) {
break;
}
b.free();
}
}
}
| 524 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/EntryLogIdsImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.LedgerDirsManager;
import org.apache.bookkeeper.bookie.storage.EntryLogIds;
import org.apache.bookkeeper.slogger.Slogger;
import org.apache.bookkeeper.util.LedgerDirUtil;
import org.apache.commons.lang3.tuple.Pair;
/**
* EntryLogIdsImpl.
*/
public class EntryLogIdsImpl implements EntryLogIds {
private final LedgerDirsManager ledgerDirsManager;
private final Slogger slog;
private int nextId;
private int maxId;
public EntryLogIdsImpl(LedgerDirsManager ledgerDirsManager,
Slogger slog) throws IOException {
this.ledgerDirsManager = ledgerDirsManager;
this.slog = slog.ctx(EntryLogIdsImpl.class);
findLargestGap();
}
@Override
public int nextId() throws IOException {
while (true) {
synchronized (this) {
int current = nextId;
nextId++;
if (nextId == maxId) {
findLargestGap();
} else {
return current;
}
}
}
}
private void findLargestGap() throws IOException {
long start = System.nanoTime();
List<Integer> currentIds = new ArrayList<Integer>();
for (File ledgerDir : ledgerDirsManager.getAllLedgerDirs()) {
currentIds.addAll(LedgerDirUtil.logIdsInDirectory(ledgerDir));
currentIds.addAll(LedgerDirUtil.compactedLogIdsInDirectory(ledgerDir));
}
Pair<Integer, Integer> gap = LedgerDirUtil.findLargestGap(currentIds);
nextId = gap.getLeft();
maxId = gap.getRight();
slog.kv("dirs", ledgerDirsManager.getAllLedgerDirs())
.kv("nextId", nextId)
.kv("maxId", maxId)
.kv("durationMs", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start))
.info(Events.ENTRYLOG_IDS_CANDIDATES_SELECTED);
}
}
| 525 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/Events.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
/**
* Events.
*/
public enum Events {
/**
* Fallocate is not available on this host. This generally indicates that the process is running on a
* non-Linux operating system. The lack of fallocate means that the filesystem will have to do more
* bookkeeping as data is written to the file, which will slow down writes.
*/
FALLOCATE_NOT_AVAILABLE,
/**
* EntryLog ID candidates selected. These are the set entry log ID that subsequent entry log files
* will use. To find the candidates, the bookie lists all the log ids which have already been used,
* and finds the longest contiguous block of free ids. Over the lifetime of a bookie, a log id can
* be reused. This is not a problem, as the ids are only referenced from the index, and an
* entry log file will not be deleted if there are still references to it in the index.
* Generally candidates are selected at bookie boot, but they may also be selected at a later time
* if the current set of candidates is depleted.
*/
ENTRYLOG_IDS_CANDIDATES_SELECTED,
/**
* The entrylogger({@link org.apache.bookkeeper.bookie.storage.EntryLogger}) has been created.
* This occurs during bookie bootup, and the same entry logger will be used for the duration of
* the bookie process's lifetime.
*/
ENTRYLOGGER_CREATED,
/**
* The entrylogger has been configured in a way that will likely result in errors during operation.
*/
ENTRYLOGGER_MISCONFIGURED,
/**
* The entrylogger has started writing a new log file. The previous log file may not
* be entirely flushed when this is called, though they will be after an explicit flush call.
*/
LOG_ROLL,
/**
* A log file has been deleted. This happens as a result of GC, when all entries in the file
* belong to deleted ledgers, or compaction, where the live entries have been copied to a new
* log.
*/
LOG_DELETED,
/**
* An error occurred closing an entrylog reader. This is non-fatal but it may leak the file handle
* and the memory buffer of the reader in question.
*/
READER_CLOSE_ERROR,
/**
* An attempt to read entrylog metadata failed. Falling back to scanning the log to get the metadata.
* This can occur if a bookie crashes before closing the entrylog cleanly.
*/
READ_METADATA_FALLBACK,
/**
* A new entrylog has been created. The filename has the format [dstLogId].compacting, where dstLogId is
* a new unique log ID. Entrylog compaction will copy live entries from an existing src log to this new
* compacting destination log. There is a 1-1 relationship between source logs and destination log logs.
* Once the copy completes, the compacting log will be marked as compacted by renaming the file to
* [dstLogId].log.[srcLogId].compacted, where srcLogId is the ID of the entrylog from which the live entries
* were copied. A new hardlink, [dstLogId].log, is created to point to the same inode, making the entry
* log available to be read. The compaction algorithm then updates the index with the offsets of the entries
* in the compacted destination log. Once complete, the index is flushed and all intermediate files (links)
* are deleted along with the original source log file.
* The entry copying phase of compaction is expensive. The renaming and linking in the algorithm exists so
* if a failure occurs after copying has completed, the work will not need to be redone.
*/
COMPACTION_LOG_CREATED,
/**
* A partially compacted log has been recovered. The log file is of the format [dstLogId].log.[srcLogId].compacted.
* The log will be scanned and the index updated with the offsets of the entries in the log. Once complete, the
* log with ID srcLogId is deleted.
* <p/>
* See {@link #COMPACTION_LOG_CREATED} for more information on compaction.
*/
COMPACTION_LOG_RECOVERED,
/**
* A compaction log has been marked as compacted. A log is marked as compacted by renaming from [dstLogId].log to
* [dstLogId].log.[srcLogId].compacted. All live entries from the src log have been successfully copied to the
* destination log, at this point.
* <p/>
* See {@link #COMPACTION_LOG_CREATED} for more information on compaction.
*/
COMPACTION_MARK_COMPACTED,
/**
* A compacted log has been made available for reads. A log is made available by creating a hardlink
* pointing from [dstLogId].log, to [dstLogId].log.[srcLogId].compacted. These files, pointing to the
* same inode, will continue to exist until the compaction operation is complete.
* <p/>
* A reader with a valid offset will now be able to read from this log, so the index can be updated.
* <p/>
* See {@link #COMPACTION_LOG_CREATED} for more information on compaction.
*/
COMPACTION_MAKE_AVAILABLE,
/**
* Compaction has been completed for a log.
* All intermediatory files are deleted, along with the src entrylog file.
* <p/>
* See {@link #COMPACTION_LOG_CREATED} for more information on compaction.
*/
COMPACTION_COMPLETE,
/**
* Failed to delete files while aborting a compaction operation. While this is not fatal, it
* can mean that there are issues writing to the filesystem that need to be investigated.
*/
COMPACTION_ABORT_EXCEPTION,
/**
* Failed to delete files while completing a compaction operation. While this is not fatal, it
* can mean that there are issues writing to the filesystem that need to be investigated.
*/
COMPACTION_DELETE_FAILURE,
}
| 526 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/Buffer.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Arrays;
import org.apache.bookkeeper.common.util.nativeio.NativeIO;
/**
* A utility buffer class to be used with native calls.
* <p/>
* Buffers are page aligned (4k pages).
* <p/>
* The wrapper mostly handles writes between ByteBuffers and
* ByteBufs. It also provides a method for padding the buffer to the next
* alignment, so writes can have an aligned size also (as required by
* direct I/O). The padding is done with 0xF0, so that if it is read as
* an integer, or long, the value will be negative (assuming the read is
* a java read, and thus a signed int).
*/
class Buffer {
/* Padding byte must have MSB set, so if read at the start
* of an integer or long, the returned value is negative. */
public static final byte PADDING_BYTE = (byte) 0xF0;
/* Some machines can live with 512 alignment, but others
* appear to require 4096, so go with 4096, which is page
* alignment */
public static final int ALIGNMENT = 4096;
private static final int MAX_ALIGNMENT = Integer.MAX_VALUE & ~(ALIGNMENT - 1);
static final byte[] PADDING = generatePadding();
final NativeIO nativeIO;
final int bufferSize;
ByteBuf buffer;
ByteBuffer byteBuffer;
ByteBufAllocator allocator;
long pointer = 0;
Buffer(NativeIO nativeIO, ByteBufAllocator allocator, int bufferSize) throws IOException {
checkArgument(isAligned(bufferSize),
"Buffer size not aligned %d", bufferSize);
this.allocator = allocator;
this.buffer = allocateAligned(ALIGNMENT, bufferSize);
this.nativeIO = nativeIO;
this.bufferSize = bufferSize;
byteBuffer = buffer.nioBuffer(0, bufferSize);
byteBuffer.order(ByteOrder.BIG_ENDIAN);
}
private ByteBuf allocateAligned(int alignment, int bufferSize) {
ByteBuf buf = allocator.directBuffer(bufferSize + alignment);
long addr = buf.memoryAddress();
if ((addr & (alignment - 1)) == 0) {
// The address is already aligned
pointer = addr;
return buf.slice(0, bufferSize);
} else {
int alignOffset = (int) (alignment - (addr & (alignment - 1)));
pointer = addr + alignOffset;
return buf.slice(alignOffset, bufferSize);
}
}
/**
* @return whether there is space in the buffer for size bytes.
*/
boolean hasSpace(int size) throws IOException {
if (size > bufferSize) {
throw new IOException(exMsg("Write too large").kv("writeSize", size)
.kv("maxSize", bufferSize).toString());
}
return byteBuffer.remaining() >= size;
}
/**
* @return whether the buffer can honour a read of size at offset.
*/
boolean hasData(int offset, int size) {
return offset + size <= bufferSize;
}
/**
* Write an integer to buffer. Progresses the position of the buffer by 4 bytes.
*/
void writeInt(int value) throws IOException {
byteBuffer.putInt(value);
}
/**
* Write a btebuf to this buffer. Progresses the position of the buffer by the
* number of readable bytes of the bytebuf. Progresses the readerIndex of the passed
* bytebuf by the number of bytes read (i.e. to the end).
*/
void writeByteBuf(ByteBuf bytebuf) throws IOException {
int bytesWritten = bytebuf.readableBytes();
ByteBuffer bytesToPut = bytebuf.nioBuffer();
byteBuffer.put(bytesToPut);
bytebuf.skipBytes(bytesWritten);
}
/**
* Read an integer from the buffer at the given offset. The offset is in bytes.
*/
int readInt(int offset) throws IOException {
if (!hasData(offset, Integer.BYTES)) {
throw new IOException(exMsg("Buffer cannot satify int read")
.kv("offset", offset)
.kv("bufferSize", bufferSize).toString());
}
try {
return byteBuffer.getInt(offset);
} catch (Exception e) {
throw new IOException(exMsg("Error reading int")
.kv("byteBuffer", byteBuffer.toString())
.kv("offset", offset)
.kv("bufferSize", bufferSize).toString(), e);
}
}
/**
* Read a long from the buffer at the given offset. The offset is in bytes.
*/
long readLong(int offset) throws IOException {
if (!hasData(offset, Long.BYTES)) {
throw new IOException(exMsg("Buffer cannot satify long read")
.kv("offset", offset)
.kv("bufferSize", bufferSize).toString());
}
try {
return byteBuffer.getLong(offset);
} catch (Exception e) {
throw new IOException(exMsg("Error reading long")
.kv("byteBuffer", byteBuffer.toString())
.kv("offset", offset)
.kv("bufferSize", bufferSize).toString(), e);
}
}
/**
* Read a bytebuf of size from the buffer at the given offset.
* If there are not enough bytes in the buffer to satify the read, some of the bytes are read
* into the byte buffer and the number of bytes read is returned.
*/
int readByteBuf(ByteBuf buffer, int offset, int size) throws IOException {
int originalLimit = byteBuffer.limit();
byteBuffer.position(offset);
int bytesToRead = Math.min(size, byteBuffer.capacity() - offset);
byteBuffer.limit(offset + bytesToRead);
try {
buffer.writeBytes(byteBuffer);
} catch (Exception e) {
throw new IOException(exMsg("Error reading buffer")
.kv("byteBuffer", byteBuffer.toString())
.kv("offset", offset).kv("size", size)
.kv("bufferSize", bufferSize).toString(), e);
} finally {
byteBuffer.limit(originalLimit);
}
return bytesToRead;
}
/**
* The data pointer object for the native buffer. This can be used
* by JNI method which take a char* or void*.
*/
long pointer() {
return pointer;
}
long pointer(long offset, long expectedWrite) {
if (offset == 0) {
return pointer;
} else {
if (offset + expectedWrite > byteBuffer.capacity()) {
throw new IllegalArgumentException(
exMsg("Buffer overflow").kv("offset", offset).kv("expectedWrite", expectedWrite)
.kv("capacity", byteBuffer.capacity()).toString());
}
return pointer + offset;
}
}
/**
* @return the number of bytes which have been written to this buffer.
*/
int position() {
return byteBuffer.position();
}
/**
* @return the size of the buffer (i.e. the max number of bytes writable, or the max offset readable)
*/
int size() {
return bufferSize;
}
/**
* Pad the buffer to the next alignment position.
* @return the position of the next alignment. This should be used as the size argument to make aligned writes.
*/
int padToAlignment() {
int bufferPos = byteBuffer.position();
int nextAlignment = nextAlignment(bufferPos);
byteBuffer.put(PADDING, 0, nextAlignment - bufferPos);
return nextAlignment;
}
/**
* Clear the bytes written. This doesn't actually destroy the data, but moves the position back to the start of
* the buffer.
*/
void reset() {
byteBuffer.clear();
}
/**
* Free the memory that backs this buffer.
*/
void free() {
ReferenceCountUtil.release(buffer);
buffer = null;
byteBuffer = null;
}
private static byte[] generatePadding() {
byte[] padding = new byte[ALIGNMENT];
Arrays.fill(padding, (byte) PADDING_BYTE);
return padding;
}
static boolean isAligned(long size) {
return size >= 0 && ((ALIGNMENT - 1) & size) == 0;
}
static int nextAlignment(int pos) {
checkArgument(pos <= MAX_ALIGNMENT,
"position (0x%x) must be lower or equal to max alignment (0x%x)",
pos, MAX_ALIGNMENT);
checkArgument(pos >= 0, "position (0x%x) must be positive", pos);
return (pos + (ALIGNMENT - 1)) & ~(ALIGNMENT - 1);
}
}
| 527 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/LogWriter.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
/**
* Interface for writing data to a bookkeeper entry log.
*/
interface LogWriter extends AutoCloseable {
/**
* Return the ID of the log being written.
*/
int logId();
/**
* Write the contents of a buffer at a predefined position in the log.
* Both the position and the size of the buffer must be page aligned (i.e. to 4096).
*/
void writeAt(long offset, ByteBuf buf) throws IOException;
/**
* Write a delimited buffer the log. The size of the buffer is first
* written and then the buffer itself.
* Note that the returned offset is for the buffer itself, not the size.
* So, if a buffer is written at the start of the file, the returned offset
* will be 4, not 0.
* The returned offset is an int. Consequently, entries can only be written
* in the first Integer.MAX_VALUE bytes of the file. This is due to how
* offsets are stored in the index.
*
* @return the offset of the buffer within the file.
*/
int writeDelimited(ByteBuf buf) throws IOException;
/**
* @return the number of bytes consumed by the buffer when written with #writeDelimited
*/
int serializedSize(ByteBuf buf);
/**
* The current offset within the log at which the next call to #writeDelimited will
* start writing.
*/
long position() throws IOException;
/**
* Set the offset within the log at which the next call to #writeDelimited will start writing.
*/
void position(long offset) throws IOException;
/**
* Flush all buffered writes to disk. This call must ensure that the bytes are actually on
* disk before returning.
*/
void flush() throws IOException;
/**
* Close any held resources.
*/
void close() throws IOException;
}
| 528 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/Header.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import static org.apache.bookkeeper.common.util.ExceptionMessageHelper.exMsg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.IOException;
/**
* The 1K block at the head of the entry logger file
* that contains the fingerprint and meta-data.
*
* <pre>
* Header is composed of:
* Fingerprint: 4 bytes "BKLO"
* Log file HeaderVersion enum: 4 bytes
* Ledger map offset: 8 bytes
* Ledgers Count: 4 bytes
* </pre>
*/
class Header {
static final int LOGFILE_LEGACY_HEADER_SIZE = 1024;
static final int LOGFILE_DIRECT_HEADER_SIZE = Buffer.ALIGNMENT;
static final int HEADER_VERSION_OFFSET = 4;
static final int LEDGERS_MAP_OFFSET = HEADER_VERSION_OFFSET + Integer.BYTES;
static final int LEDGER_COUNT_OFFSET = LEDGERS_MAP_OFFSET + Long.BYTES;
static final int HEADER_V0 = 0; // Old log file format (no ledgers map index)
static final int HEADER_V1 = 1; // Introduced ledger map index
static final int HEADER_CURRENT_VERSION = HEADER_V1;
static final byte[] EMPTY_HEADER = new byte[LOGFILE_DIRECT_HEADER_SIZE];
static {
ByteBuf buf = Unpooled.wrappedBuffer(EMPTY_HEADER);
buf.setByte(0, 'B');
buf.setByte(1, 'K');
buf.setByte(2, 'L');
buf.setByte(3, 'O');
buf.setInt(HEADER_VERSION_OFFSET, HEADER_V1);
// legacy header size is 1024, while direct is 4096 so that it can be written as a single block
// to avoid legacy failing when it encounters the header in direct, create a dummy entry, which
// skips to the start of the second block
buf.setInt(LOGFILE_LEGACY_HEADER_SIZE, (buf.capacity() - LOGFILE_LEGACY_HEADER_SIZE) - Integer.BYTES);
buf.setLong(LOGFILE_LEGACY_HEADER_SIZE + Integer.BYTES, LogMetadata.INVALID_LID);
};
static int extractVersion(ByteBuf header) throws IOException {
assertFingerPrint(header);
return header.getInt(HEADER_VERSION_OFFSET);
}
static long extractLedgerMapOffset(ByteBuf header) throws IOException {
assertFingerPrint(header);
return header.getLong(LEDGERS_MAP_OFFSET);
}
static int extractLedgerCount(ByteBuf header) throws IOException {
assertFingerPrint(header);
return header.getInt(LEDGER_COUNT_OFFSET);
}
static void assertFingerPrint(ByteBuf header) throws IOException {
if (header.getByte(0) != 'B'
|| header.getByte(1) != 'K'
|| header.getByte(2) != 'L'
|| header.getByte(3) != 'O') {
throw new IOException(exMsg("Bad fingerprint (should be BKLO)")
.kv("byte0", header.getByte(0))
.kv("byte1", header.getByte(1))
.kv("byte2", header.getByte(2))
.kv("byte3", header.getByte(3))
.toString());
}
}
static void writeEmptyHeader(ByteBuf header) throws IOException {
header.writeBytes(EMPTY_HEADER);
}
static void writeHeader(ByteBuf header,
long ledgerMapOffset, int ledgerCount) throws IOException {
header.writeBytes(EMPTY_HEADER);
header.setLong(LEDGERS_MAP_OFFSET, ledgerMapOffset);
header.setInt(LEDGER_COUNT_OFFSET, ledgerCount);
}
}
| 529 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/LogReader.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
import io.netty.buffer.ByteBuf;
import java.io.EOFException;
import java.io.IOException;
/**
* Interface for reading from a bookkeeper entry log.
*/
public interface LogReader extends AutoCloseable {
/**
* @return the id of the log being read from.
*/
int logId();
/**
* @return the maximum offset in the file that can be read from.
*/
long maxOffset();
/**
* Read a buffer from the file. It is the responsibility of the caller to release
* the returned buffer.
* @param offset the offset to read at
* @param size the number of bytes to read
* @return a bytebuf. The caller must release.
*/
ByteBuf readBufferAt(long offset, int size) throws IOException, EOFException;
void readIntoBufferAt(ByteBuf buffer, long offset, int size) throws IOException, EOFException;
/**
* Read an integer at a given offset.
* @param offset the offset to read from.
* @return the integer at that offset.
*/
int readIntAt(long offset) throws IOException, EOFException;
/**
* Read an long at a given offset.
* @param offset the offset to read from.
* @return the long at that offset.
*/
long readLongAt(long offset) throws IOException, EOFException;
/**
* Read an entry at a given offset.
* The size of the entry must be at (offset - Integer.BYTES).
* The payload of the entry starts at offset.
* It is the responsibility of the caller to release the returned buffer.
* @param offset the offset at which to read the entry.
* @return a bytebuf. The caller must release.
*/
ByteBuf readEntryAt(int offset) throws IOException, EOFException;
@Override
void close() throws IOException;
boolean isClosed();
}
| 530 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/directentrylogger/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Support for bookie entry logs using Direct IO.
*/
package org.apache.bookkeeper.bookie.storage.directentrylogger;
| 531 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/EntryLocationIndexStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for ledger metadata index stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Entry location index stats"
)
@Getter
class EntryLocationIndexStats {
private static final String ENTRIES_COUNT = "entries-count";
private static final String LOOKUP_ENTRY_LOCATION = "lookup-entry-location";
@StatsDoc(
name = ENTRIES_COUNT,
help = "Current number of entries"
)
private final Gauge<Long> entriesCountGauge;
@StatsDoc(
name = LOOKUP_ENTRY_LOCATION,
help = "operation stats of looking up entry location"
)
private final OpStatsLogger lookupEntryLocationStats;
EntryLocationIndexStats(StatsLogger statsLogger,
Supplier<Long> entriesCountSupplier) {
entriesCountGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return entriesCountSupplier.get();
}
};
statsLogger.registerGauge(ENTRIES_COUNT, entriesCountGauge);
lookupEntryLocationStats = statsLogger.getOpStatsLogger(LOOKUP_ENTRY_LOCATION);
}
}
| 532 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ArrayGroupSort.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static com.google.common.base.Preconditions.checkArgument;
import lombok.experimental.UtilityClass;
/**
* Sort an array of longs, grouping the items in tuples.
*
* <p>Group size decides how many longs are included in the tuples and key size controls how many items to use for
* comparison.
*/
@UtilityClass
public class ArrayGroupSort {
private static final int INSERTION_SORT_THRESHOLD = 100;
private static final int GROUP_SIZE = 4;
public void sort(long[] array) {
sort(array, 0, array.length);
}
public static void sort(long[] array, int offset, int length) {
checkArgument(length % GROUP_SIZE == 0, "Array length must be multiple of 4");
quickSort(array, offset, (length + offset - GROUP_SIZE));
}
////// Private
private static void quickSort(long[] array, int low, int high) {
if (low >= high) {
return;
}
if (high - low < INSERTION_SORT_THRESHOLD) {
insertionSort(array, low, high);
return;
}
int pivotIdx = partition(array, low, high);
quickSort(array, low, pivotIdx - GROUP_SIZE);
quickSort(array, pivotIdx + GROUP_SIZE, high);
}
private static int alignGroup(int count) {
return count - (count % GROUP_SIZE);
}
private static int partition(long[] array, int low, int high) {
int mid = low + alignGroup((high - low) / 2);
swap(array, mid, high);
int i = low;
for (int j = low; j < high; j += GROUP_SIZE) {
if (isLess(array, j, high)) {
swap(array, j, i);
i += GROUP_SIZE;
}
}
swap(array, i, high);
return i;
}
private static void swap(long[] array, int a, int b) {
long tmp0 = array[a];
long tmp1 = array[a + 1];
long tmp2 = array[a + 2];
long tmp3 = array[a + 3];
array[a] = array[b];
array[a + 1] = array[b + 1];
array[a + 2] = array[b + 2];
array[a + 3] = array[b + 3];
array[b] = tmp0;
array[b + 1] = tmp1;
array[b + 2] = tmp2;
array[b + 3] = tmp3;
}
private static boolean isLess(long[] array, int a, int b) {
long a0 = array[a];
long b0 = array[b];
if (a0 < b0) {
return true;
} else if (a0 > b0) {
return false;
}
return array[a + 1] < array[b + 1];
}
private static void insertionSort(long[] a, int low, int high) {
for (int i = low + GROUP_SIZE; i <= high; i += GROUP_SIZE) {
int j = i;
while (j > 0 && isLess(a, j, j - GROUP_SIZE)) {
swap(a, j, j - GROUP_SIZE);
j -= GROUP_SIZE;
}
}
}
}
| 533 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/KeyValueStorageFactory.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import java.io.IOException;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* Factory class to create instances of the key-value storage implementation.
*/
public interface KeyValueStorageFactory {
/**
* Enum used to specify different config profiles in the underlying storage.
*/
enum DbConfigType {
Default, // Used for default,command until or test case
LedgerMetadata, // Used for ledgers db, doesn't need particular configuration
EntryLocation // Used for location index, lots of writes and much bigger dataset
}
KeyValueStorage newKeyValueStorage(String defaultBasePath, String subPath, DbConfigType dbConfigType,
ServerConfiguration conf)
throws IOException;
}
| 534 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LongPairWrapper.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
/**
* Recyclable wrapper that holds a pair of longs.
*/
class LongPairWrapper {
final byte[] array = new byte[16];
public void set(long first, long second) {
ArrayUtil.setLong(array, 0, first);
ArrayUtil.setLong(array, 8, second);
}
public long getFirst() {
return ArrayUtil.getLong(array, 0);
}
public long getSecond() {
return ArrayUtil.getLong(array, 8);
}
public static LongPairWrapper get(long first, long second) {
LongPairWrapper lp = RECYCLER.get();
ArrayUtil.setLong(lp.array, 0, first);
ArrayUtil.setLong(lp.array, 8, second);
return lp;
}
public void recycle() {
handle.recycle(this);
}
private static final Recycler<LongPairWrapper> RECYCLER = new Recycler<LongPairWrapper>() {
@Override
protected LongPairWrapper newObject(Handle<LongPairWrapper> handle) {
return new LongPairWrapper(handle);
}
};
private final Handle<LongPairWrapper> handle;
private LongPairWrapper(Handle<LongPairWrapper> handle) {
this.handle = handle;
}
} | 535 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LongWrapper.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
/**
* Wrapper for a long serialized into a byte array.
*/
class LongWrapper {
final byte[] array = new byte[8];
public void set(long value) {
ArrayUtil.setLong(array, 0, value);
}
public long getValue() {
return ArrayUtil.getLong(array, 0);
}
public static LongWrapper get() {
return RECYCLER.get();
}
public static LongWrapper get(long value) {
LongWrapper lp = RECYCLER.get();
ArrayUtil.setLong(lp.array, 0, value);
return lp;
}
public void recycle() {
handle.recycle(this);
}
private static final Recycler<LongWrapper> RECYCLER = new Recycler<LongWrapper>() {
@Override
protected LongWrapper newObject(Handle<LongWrapper> handle) {
return new LongWrapper(handle);
}
};
private final Handle<LongWrapper> handle;
private LongWrapper(Handle<LongWrapper> handle) {
this.handle = handle;
}
} | 536 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndex.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.google.protobuf.ByteString;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.util.AbstractMap.SimpleEntry;
import java.util.Arrays;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.bookkeeper.bookie.Bookie;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageDataFormats.LedgerData;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorage.CloseableIterator;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Maintains an index for the ledgers metadata.
*
* <p>The key is the ledgerId and the value is the {@link LedgerData} content.
*/
public class LedgerMetadataIndex implements Closeable {
// Non-ledger data should have negative ID
private static final long STORAGE_FLAGS = -0xeefd;
// Contains all ledgers stored in the bookie
private final ConcurrentLongHashMap<LedgerData> ledgers;
private final AtomicInteger ledgersCount;
private final KeyValueStorage ledgersDb;
private final LedgerMetadataIndexStats stats;
// Holds ledger modifications applied in memory map, and pending to be flushed on db
private final ConcurrentLinkedQueue<Entry<Long, LedgerData>> pendingLedgersUpdates;
// Holds ledger ids that were delete from memory map, and pending to be flushed on db
private final Set<Long> pendingDeletedLedgers;
private final ReentrantLock[] locks = new ReentrantLock[16];
public LedgerMetadataIndex(ServerConfiguration conf, KeyValueStorageFactory storageFactory, String basePath,
StatsLogger stats) throws IOException {
ledgersDb = storageFactory.newKeyValueStorage(basePath, "ledgers", DbConfigType.LedgerMetadata, conf);
ledgers = ConcurrentLongHashMap.<LedgerData>newBuilder().build();
ledgersCount = new AtomicInteger();
// Read all ledgers from db
CloseableIterator<Entry<byte[], byte[]>> iterator = ledgersDb.iterator();
try {
while (iterator.hasNext()) {
Entry<byte[], byte[]> entry = iterator.next();
long ledgerId = ArrayUtil.getLong(entry.getKey(), 0);
if (ledgerId >= 0) {
LedgerData ledgerData = LedgerData.parseFrom(entry.getValue());
ledgers.put(ledgerId, ledgerData);
ledgersCount.incrementAndGet();
}
}
} finally {
iterator.close();
}
this.pendingLedgersUpdates = new ConcurrentLinkedQueue<Entry<Long, LedgerData>>();
this.pendingDeletedLedgers = Sets.newConcurrentHashSet();
this.stats = new LedgerMetadataIndexStats(
stats,
() -> (long) ledgersCount.get());
for (int i = 0; i < locks.length; i++) {
locks[i] = new ReentrantLock();
}
}
@Override
public void close() throws IOException {
ledgersDb.close();
}
public LedgerData get(long ledgerId) throws IOException {
LedgerData ledgerData = ledgers.get(ledgerId);
if (ledgerData == null) {
if (log.isDebugEnabled()) {
log.debug("Ledger not found {}", ledgerId);
}
throw new Bookie.NoLedgerException(ledgerId);
}
return ledgerData;
}
public void set(long ledgerId, LedgerData ledgerData) throws IOException {
ledgerData = LedgerData.newBuilder(ledgerData).setExists(true).build();
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
if (ledgers.put(ledgerId, ledgerData) == null) {
if (log.isDebugEnabled()) {
log.debug("Added new ledger {}", ledgerId);
}
ledgersCount.incrementAndGet();
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, ledgerData));
pendingDeletedLedgers.remove(ledgerId);
} finally {
lock.unlock();
}
}
public void delete(long ledgerId) throws IOException {
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
if (ledgers.remove(ledgerId) != null) {
if (log.isDebugEnabled()) {
log.debug("Removed ledger {}", ledgerId);
}
ledgersCount.decrementAndGet();
}
pendingDeletedLedgers.add(ledgerId);
pendingLedgersUpdates.removeIf(e -> e.getKey() == ledgerId);
} finally {
lock.unlock();
}
}
public Iterable<Long> getActiveLedgersInRange(final long firstLedgerId, final long lastLedgerId)
throws IOException {
if (firstLedgerId <= 0 && lastLedgerId == Long.MAX_VALUE) {
return ledgers.keys();
}
return Iterables.filter(ledgers.keys(), new Predicate<Long>() {
@Override
public boolean apply(Long ledgerId) {
return ledgerId >= firstLedgerId && ledgerId < lastLedgerId;
}
});
}
public boolean setFenced(long ledgerId) throws IOException {
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
LedgerData ledgerData = get(ledgerId);
if (ledgerData.getFenced()) {
return false;
}
LedgerData newLedgerData = LedgerData.newBuilder(ledgerData).setFenced(true).build();
if (ledgers.put(ledgerId, newLedgerData) == null) {
// Ledger had been deleted
if (log.isDebugEnabled()) {
log.debug("Re-inserted fenced ledger {}", ledgerId);
}
ledgersCount.incrementAndGet();
} else if (log.isDebugEnabled()) {
log.debug("Set fenced ledger {}", ledgerId);
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, newLedgerData));
pendingDeletedLedgers.remove(ledgerId);
return true;
} finally {
lock.unlock();
}
}
public boolean setLimbo(long ledgerId) throws IOException {
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
LedgerData ledgerData = get(ledgerId);
if (ledgerData.getLimbo()) {
return false;
}
LedgerData newLedgerData = LedgerData.newBuilder(ledgerData).setLimbo(true).build();
if (ledgers.put(ledgerId, newLedgerData) == null) {
// Ledger had been deleted
if (log.isDebugEnabled()) {
log.debug("Re-inserted limbo ledger {}", ledgerId);
}
ledgersCount.incrementAndGet();
} else if (log.isDebugEnabled()) {
log.debug("Set limbo ledger {}", ledgerId);
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, newLedgerData));
pendingDeletedLedgers.remove(ledgerId);
return true;
} finally {
lock.unlock();
}
}
public boolean clearLimbo(long ledgerId) throws IOException {
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
LedgerData ledgerData = get(ledgerId);
if (ledgerData == null) {
throw new Bookie.NoLedgerException(ledgerId);
}
final boolean oldValue = ledgerData.getLimbo();
LedgerData newLedgerData = LedgerData.newBuilder(ledgerData).setLimbo(false).build();
if (ledgers.put(ledgerId, newLedgerData) == null) {
// Ledger had been deleted
if (log.isDebugEnabled()) {
log.debug("Re-inserted limbo ledger {}", ledgerId);
}
ledgersCount.incrementAndGet();
} else if (log.isDebugEnabled()) {
log.debug("Set limbo ledger {}", ledgerId);
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, newLedgerData));
pendingDeletedLedgers.remove(ledgerId);
return oldValue;
} finally {
lock.unlock();
}
}
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
ReentrantLock lock = lockForLedger(ledgerId);
lock.lock();
try {
LedgerData ledgerData = ledgers.get(ledgerId);
if (ledgerData == null) {
// New ledger inserted
ledgerData = LedgerData.newBuilder().setExists(true).setFenced(false)
.setMasterKey(ByteString.copyFrom(masterKey)).build();
if (log.isDebugEnabled()) {
log.debug("Inserting new ledger {}", ledgerId);
}
} else {
byte[] storedMasterKey = ledgerData.getMasterKey().toByteArray();
if (ArrayUtil.isArrayAllZeros(storedMasterKey)) {
// update master key of the ledger
ledgerData = LedgerData.newBuilder(ledgerData).setMasterKey(ByteString.copyFrom(masterKey)).build();
if (log.isDebugEnabled()) {
log.debug("Replace old master key {} with new master key {}", storedMasterKey, masterKey);
}
} else if (!Arrays.equals(storedMasterKey, masterKey) && !ArrayUtil.isArrayAllZeros(masterKey)) {
log.warn("Ledger {} masterKey in db can only be set once.", ledgerId);
throw new IOException(BookieException.create(BookieException.Code.IllegalOpException));
}
}
if (ledgers.put(ledgerId, ledgerData) == null) {
ledgersCount.incrementAndGet();
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, ledgerData));
pendingDeletedLedgers.remove(ledgerId);
} finally {
lock.unlock();
}
}
/**
* Flushes all pending changes.
*/
public void flush() throws IOException {
LongWrapper key = LongWrapper.get();
try {
int updatedLedgers = 0;
Entry<Long, LedgerData> entry;
while ((entry = pendingLedgersUpdates.poll()) != null) {
key.set(entry.getKey());
byte[] value = entry.getValue().toByteArray();
ledgersDb.put(key.array, value);
++updatedLedgers;
}
if (log.isDebugEnabled()) {
log.debug("Persisting updates to {} ledgers", updatedLedgers);
}
ledgersDb.sync();
} finally {
key.recycle();
}
}
public void removeDeletedLedgers() throws IOException {
LongWrapper key = LongWrapper.get();
try {
int deletedLedgers = 0;
for (Long ledgerId : pendingDeletedLedgers) {
key.set(ledgerId);
ledgersDb.delete(key.array);
++deletedLedgers;
}
if (log.isDebugEnabled()) {
log.debug("Persisting deletes of ledgers {}", deletedLedgers);
}
ledgersDb.sync();
} finally {
key.recycle();
}
}
private ReentrantLock lockForLedger(long ledgerId) {
return locks[Math.abs((int) ledgerId) % locks.length];
}
int getStorageStateFlags() throws IOException {
LongWrapper keyWrapper = LongWrapper.get();
LongWrapper currentWrapper = LongWrapper.get();
try {
keyWrapper.set(STORAGE_FLAGS);
synchronized (ledgersDb) {
int current = 0;
if (ledgersDb.get(keyWrapper.array, currentWrapper.array) >= 0) {
current = (int) currentWrapper.getValue();
}
return current;
}
} finally {
keyWrapper.recycle();
currentWrapper.recycle();
}
}
boolean setStorageStateFlags(int expected, int newFlags) throws IOException {
LongWrapper keyWrapper = LongWrapper.get();
LongWrapper currentWrapper = LongWrapper.get();
LongWrapper newFlagsWrapper = LongWrapper.get();
try {
keyWrapper.set(STORAGE_FLAGS);
newFlagsWrapper.set(newFlags);
synchronized (ledgersDb) {
int current = 0;
if (ledgersDb.get(keyWrapper.array, currentWrapper.array) >= 0) {
current = (int) currentWrapper.getValue();
}
if (current == expected) {
ledgersDb.put(keyWrapper.array, newFlagsWrapper.array);
ledgersDb.sync();
return true;
}
}
} finally {
keyWrapper.recycle();
currentWrapper.recycle();
newFlagsWrapper.recycle();
}
return false;
}
private static final Logger log = LoggerFactory.getLogger(LedgerMetadataIndex.class);
void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
LedgerData ledgerData = ledgers.get(ledgerId);
if (ledgerData != null) {
LedgerData newLedgerData = LedgerData.newBuilder(ledgerData)
.setExplicitLac(ByteString.copyFrom(lac.nioBuffer())).build();
if (ledgers.put(ledgerId, newLedgerData) == null) {
// Ledger had been deleted
return;
} else if (log.isDebugEnabled()) {
log.debug("Set explicitLac on ledger {}", ledgerId);
}
pendingLedgersUpdates.add(new SimpleEntry<Long, LedgerData>(ledgerId, newLedgerData));
} else {
// unknown ledger here
}
}
}
| 537 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/TransientLedgerInfo.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.bookie.LastAddConfirmedUpdateNotification.WATCHER_RECYCLER;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.LastAddConfirmedUpdateNotification;
import org.apache.bookkeeper.common.util.Watchable;
import org.apache.bookkeeper.common.util.Watcher;
/**
* This class borrows the logic from FileInfo.
*
* <p>This class is used for holding all the transient states for a given ledger.
*/
class TransientLedgerInfo extends Watchable<LastAddConfirmedUpdateNotification> implements AutoCloseable {
static final long LEDGER_INFO_CACHING_TIME_MINUTES = 10;
static final long NOT_ASSIGNED_LAC = Long.MIN_VALUE;
// lac
private volatile long lac = NOT_ASSIGNED_LAC;
// request from explicit lac requests
private ByteBuffer explicitLac = null;
// is the ledger info closed?
private boolean isClosed;
private final long ledgerId;
// reference to LedgerMetadataIndex
private final LedgerMetadataIndex ledgerIndex;
private long lastAccessed;
/**
* Construct an Watchable with zero watchers.
*/
public TransientLedgerInfo(long ledgerId, LedgerMetadataIndex ledgerIndex) {
super(WATCHER_RECYCLER);
this.ledgerId = ledgerId;
this.ledgerIndex = ledgerIndex;
this.lastAccessed = System.currentTimeMillis();
}
long getLastAddConfirmed() {
return lac;
}
long setLastAddConfirmed(long lac) {
long lacToReturn;
boolean changed = false;
synchronized (this) {
if (this.lac == NOT_ASSIGNED_LAC || this.lac < lac) {
this.lac = lac;
changed = true;
lastAccessed = System.currentTimeMillis();
}
lacToReturn = this.lac;
}
if (changed) {
notifyWatchers(lacToReturn);
}
return lacToReturn;
}
synchronized boolean waitForLastAddConfirmedUpdate(long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
lastAccessed = System.currentTimeMillis();
if ((lac != NOT_ASSIGNED_LAC && lac > previousLAC) || isClosed) {
return false;
}
addWatcher(watcher);
return true;
}
synchronized void cancelWaitForLastAddConfirmedUpdate(Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
deleteWatcher(watcher);
}
public ByteBuf getExplicitLac() {
ByteBuf retLac = null;
synchronized (this) {
if (explicitLac != null) {
retLac = Unpooled.buffer(explicitLac.capacity());
explicitLac.rewind(); // copy from the beginning
retLac.writeBytes(explicitLac);
explicitLac.rewind();
return retLac;
}
}
return retLac;
}
public void setExplicitLac(ByteBuf lac) {
long explicitLacValue;
synchronized (this) {
if (explicitLac == null) {
explicitLac = ByteBuffer.allocate(lac.capacity());
}
int readerIndex = lac.readerIndex();
lac.readBytes(explicitLac);
lac.readerIndex(readerIndex);
explicitLac.rewind();
// skip the ledger id
explicitLac.getLong();
explicitLacValue = explicitLac.getLong();
explicitLac.rewind();
lastAccessed = System.currentTimeMillis();
}
setLastAddConfirmed(explicitLacValue);
}
boolean isStale() {
return (lastAccessed + TimeUnit.MINUTES.toMillis(LEDGER_INFO_CACHING_TIME_MINUTES)) < System
.currentTimeMillis();
}
void notifyWatchers(long lastAddConfirmed) {
notifyWatchers(LastAddConfirmedUpdateNotification.FUNC, lastAddConfirmed);
}
@Override
public void close() {
synchronized (this) {
if (isClosed) {
return;
}
isClosed = true;
}
// notify watchers
notifyWatchers(Long.MAX_VALUE);
}
}
| 538 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.bookie.storage.ldb.WriteCache.align64;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.util.ReferenceCountUtil;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Read cache implementation.
*
* <p>Uses the specified amount of memory and pairs it with a hashmap.
*
* <p>The memory is splitted in multiple segments that are used in a
* ring-buffer fashion. When the read cache is full, the oldest segment
* is cleared and rotated to make space for new entries to be added to
* the read cache.
*/
public class ReadCache implements Closeable {
private static final Logger log = LoggerFactory.getLogger(ReadCache.class);
private static final int DEFAULT_MAX_SEGMENT_SIZE = 1 * 1024 * 1024 * 1024;
private final List<ByteBuf> cacheSegments;
private final List<ConcurrentLongLongPairHashMap> cacheIndexes;
private int currentSegmentIdx;
private final AtomicInteger currentSegmentOffset = new AtomicInteger(0);
private final int segmentSize;
private ByteBufAllocator allocator;
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
public ReadCache(ByteBufAllocator allocator, long maxCacheSize) {
this(allocator, maxCacheSize, DEFAULT_MAX_SEGMENT_SIZE);
}
public ReadCache(ByteBufAllocator allocator, long maxCacheSize, int maxSegmentSize) {
this.allocator = allocator;
int segmentsCount = Math.max(2, (int) (maxCacheSize / maxSegmentSize));
segmentSize = (int) (maxCacheSize / segmentsCount);
cacheSegments = new ArrayList<>();
cacheIndexes = new ArrayList<>();
for (int i = 0; i < segmentsCount; i++) {
cacheSegments.add(Unpooled.directBuffer(segmentSize, segmentSize));
ConcurrentLongLongPairHashMap concurrentLongLongPairHashMap = ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(4096)
.concurrencyLevel(2 * Runtime.getRuntime().availableProcessors())
.build();
cacheIndexes.add(concurrentLongLongPairHashMap);
}
}
@Override
public void close() {
cacheSegments.forEach(ReferenceCountUtil::safeRelease);
}
public void put(long ledgerId, long entryId, ByteBuf entry) {
int entrySize = entry.readableBytes();
int alignedSize = align64(entrySize);
lock.readLock().lock();
try {
if (entrySize > segmentSize) {
log.warn("entrySize {} > segmentSize {}, skip update read cache!", entrySize, segmentSize);
return;
}
int offset = currentSegmentOffset.getAndAdd(alignedSize);
if (offset + entrySize > segmentSize) {
// Roll-over the segment (outside the read-lock)
} else {
// Copy entry into read cache segment
cacheSegments.get(currentSegmentIdx).setBytes(offset, entry, entry.readerIndex(),
entry.readableBytes());
cacheIndexes.get(currentSegmentIdx).put(ledgerId, entryId, offset, entrySize);
return;
}
} finally {
lock.readLock().unlock();
}
// We could not insert in segment, we to get the write lock and roll-over to
// next segment
lock.writeLock().lock();
try {
int offset = currentSegmentOffset.getAndAdd(entrySize);
if (offset + entrySize > segmentSize) {
// Rollover to next segment
currentSegmentIdx = (currentSegmentIdx + 1) % cacheSegments.size();
currentSegmentOffset.set(alignedSize);
cacheIndexes.get(currentSegmentIdx).clear();
offset = 0;
}
// Copy entry into read cache segment
cacheSegments.get(currentSegmentIdx).setBytes(offset, entry, entry.readerIndex(), entry.readableBytes());
cacheIndexes.get(currentSegmentIdx).put(ledgerId, entryId, offset, entrySize);
} finally {
lock.writeLock().unlock();
}
}
public ByteBuf get(long ledgerId, long entryId) {
lock.readLock().lock();
try {
// We need to check all the segments, starting from the current one and looking
// backward to minimize the
// checks for recently inserted entries
int size = cacheSegments.size();
for (int i = 0; i < size; i++) {
int segmentIdx = (currentSegmentIdx + (size - i)) % size;
LongPair res = cacheIndexes.get(segmentIdx).get(ledgerId, entryId);
if (res != null) {
int entryOffset = (int) res.first;
int entryLen = (int) res.second;
ByteBuf entry = allocator.buffer(entryLen, entryLen);
entry.writeBytes(cacheSegments.get(segmentIdx), entryOffset, entryLen);
return entry;
}
}
} finally {
lock.readLock().unlock();
}
// Entry not found in any segment
return null;
}
public boolean hasEntry(long ledgerId, long entryId) {
lock.readLock().lock();
try {
int size = cacheSegments.size();
for (int i = 0; i < size; i++) {
int segmentIdx = (currentSegmentIdx + (size - i)) % size;
LongPair res = cacheIndexes.get(segmentIdx).get(ledgerId, entryId);
if (res != null) {
return true;
}
}
} finally {
lock.readLock().unlock();
}
// Entry not found in any segment
return false;
}
/**
* @return the total size of cached entries
*/
public long size() {
lock.readLock().lock();
try {
long size = 0;
for (int i = 0; i < cacheIndexes.size(); i++) {
if (i == currentSegmentIdx) {
size += currentSegmentOffset.get();
} else if (!cacheIndexes.get(i).isEmpty()) {
size += segmentSize;
} else {
// the segment is empty
}
}
return size;
} finally {
lock.readLock().unlock();
}
}
/**
* @return the total number of cached entries
*/
public long count() {
lock.readLock().lock();
try {
long count = 0;
for (int i = 0; i < cacheIndexes.size(); i++) {
count += cacheIndexes.get(i).size();
}
return count;
} finally {
lock.readLock().unlock();
}
}
}
| 539 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/PersistentEntryLogMetadataMap.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.util.BookKeeperConstants.METADATA_CACHE;
import io.netty.util.concurrent.FastThreadLocal;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException.EntryLogMetadataMapException;
import org.apache.bookkeeper.bookie.EntryLogMetadata;
import org.apache.bookkeeper.bookie.EntryLogMetadata.EntryLogMetadataRecyclable;
import org.apache.bookkeeper.bookie.EntryLogMetadataMap;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorage.CloseableIterator;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* Persistent entryLogMetadata-map that stores entry-loggers metadata into
* rocksDB.
*/
@Slf4j
public class PersistentEntryLogMetadataMap implements EntryLogMetadataMap {
// persistent Rocksdb to store metadata-map
private final KeyValueStorage metadataMapDB;
private AtomicBoolean isClosed = new AtomicBoolean(false);
private static final FastThreadLocal<ByteArrayOutputStream> baos = new FastThreadLocal<ByteArrayOutputStream>() {
@Override
protected ByteArrayOutputStream initialValue() {
return new ByteArrayOutputStream();
}
};
private static final FastThreadLocal<ByteArrayInputStream> bais = new FastThreadLocal<ByteArrayInputStream>() {
@Override
protected ByteArrayInputStream initialValue() {
return new ByteArrayInputStream(new byte[1]);
}
};
private static final FastThreadLocal<DataOutputStream> dataos = new FastThreadLocal<DataOutputStream>() {
@Override
protected DataOutputStream initialValue() {
return new DataOutputStream(baos.get());
}
};
private static final FastThreadLocal<DataInputStream> datais = new FastThreadLocal<DataInputStream>() {
@Override
protected DataInputStream initialValue() {
return new DataInputStream(bais.get());
}
};
public PersistentEntryLogMetadataMap(String metadataPath, ServerConfiguration conf) throws IOException {
log.info("Loading persistent entrylog metadata-map from {}/{}", metadataPath, METADATA_CACHE);
File dir = new File(metadataPath);
if (!dir.mkdirs() && !dir.exists()) {
String err = "Unable to create directory " + dir;
log.error(err);
throw new IOException(err);
}
metadataMapDB = KeyValueStorageRocksDB.factory.newKeyValueStorage(metadataPath, METADATA_CACHE,
DbConfigType.Default, conf);
}
@Override
public boolean containsKey(long entryLogId) throws EntryLogMetadataMapException {
throwIfClosed();
LongWrapper key = LongWrapper.get(entryLogId);
try {
boolean isExist;
try {
isExist = metadataMapDB.get(key.array) != null;
} catch (IOException e) {
throw new EntryLogMetadataMapException(e);
}
return isExist;
} finally {
key.recycle();
}
}
@Override
public void put(long entryLogId, EntryLogMetadata entryLogMeta) throws EntryLogMetadataMapException {
throwIfClosed();
LongWrapper key = LongWrapper.get(entryLogId);
try {
baos.get().reset();
try {
entryLogMeta.serialize(dataos.get());
metadataMapDB.put(key.array, baos.get().toByteArray());
} catch (IllegalStateException | IOException e) {
log.error("Failed to serialize entrylog-metadata, entryLogId {}", entryLogId);
throw new EntryLogMetadataMapException(e);
}
} finally {
key.recycle();
}
}
/**
* {@link EntryLogMetadata} life-cycle in supplied action will be transient
* and it will be recycled as soon as supplied action is completed.
*/
@Override
public void forEach(BiConsumer<Long, EntryLogMetadata> action) throws EntryLogMetadataMapException {
throwIfClosed();
CloseableIterator<Entry<byte[], byte[]>> iterator = metadataMapDB.iterator();
try {
while (iterator.hasNext()) {
if (isClosed.get()) {
break;
}
Entry<byte[], byte[]> entry = iterator.next();
long entryLogId = ArrayUtil.getLong(entry.getKey(), 0);
EntryLogMetadataRecyclable metadata = getEntryLogMetadataRecyclable(entry.getValue());
try {
action.accept(entryLogId, metadata);
} finally {
metadata.recycle();
}
}
} catch (IOException e) {
log.error("Failed to iterate over entry-log metadata map {}", e.getMessage(), e);
throw new EntryLogMetadataMapException(e);
} finally {
try {
iterator.close();
} catch (IOException e) {
log.error("Failed to close entry-log metadata-map rocksDB iterator {}", e.getMessage(), e);
}
}
}
/**
* {@link EntryLogMetadata} life-cycle in supplied action will be transient
* and it will be recycled as soon as supplied action is completed.
*/
@Override
public void forKey(long entryLogId, BiConsumer<Long, EntryLogMetadata> action) throws EntryLogMetadataMapException {
throwIfClosed();
LongWrapper key = LongWrapper.get(entryLogId);
try {
byte[] value = metadataMapDB.get(key.array);
if (value == null || value.length == 0) {
action.accept(entryLogId, null);
return;
}
EntryLogMetadataRecyclable metadata = getEntryLogMetadataRecyclable(value);
try {
action.accept(entryLogId, metadata);
} finally {
metadata.recycle();
}
} catch (IOException e) {
log.error("Failed to get metadata for entryLogId {}: {}", entryLogId, e.getMessage(), e);
throw new EntryLogMetadataMapException(e);
} finally {
key.recycle();
}
}
private EntryLogMetadataRecyclable getEntryLogMetadataRecyclable(byte[] value) throws IOException {
ByteArrayInputStream localBais = bais.get();
DataInputStream localDatais = datais.get();
if (localBais.available() < value.length) {
localBais.close();
localDatais.close();
ByteArrayInputStream newBais = new ByteArrayInputStream(value);
bais.set(newBais);
datais.set(new DataInputStream(newBais));
} else {
localBais.read(value, 0, value.length);
}
localBais.reset();
localDatais.reset();
EntryLogMetadataRecyclable metadata = EntryLogMetadata.deserialize(datais.get());
return metadata;
}
@Override
public void remove(long entryLogId) throws EntryLogMetadataMapException {
throwIfClosed();
LongWrapper key = LongWrapper.get(entryLogId);
try {
try {
metadataMapDB.delete(key.array);
} catch (IOException e) {
throw new EntryLogMetadataMapException(e);
}
} finally {
key.recycle();
}
}
@Override
public int size() throws EntryLogMetadataMapException {
throwIfClosed();
try {
return (int) metadataMapDB.count();
} catch (IOException e) {
throw new EntryLogMetadataMapException(e);
}
}
@Override
public void clear() throws EntryLogMetadataMapException {
try {
try (KeyValueStorage.Batch b = metadataMapDB.newBatch();
CloseableIterator<byte[]> itr = metadataMapDB.keys()) {
while (itr.hasNext()) {
b.remove(itr.next());
}
b.flush();
}
} catch (IOException e) {
throw new EntryLogMetadataMapException(e);
}
}
@Override
public void close() throws IOException {
if (isClosed.compareAndSet(false, true)) {
metadataMapDB.close();
} else {
log.warn("Attempted to close already closed PersistentEntryLogMetadataMap");
}
}
public void throwIfClosed() throws EntryLogMetadataMapException {
if (isClosed.get()) {
final String msg = "Attempted to use PersistentEntryLogMetadataMap after it was closed";
log.error(msg);
throw new EntryLogMetadataMapException(new IOException(msg));
}
}
}
| 540 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgersIndexCheckOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.util.Base64;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.BookieImpl;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.commons.lang.time.DurationFormatUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Scan the ledgers index to make sure it is readable.
*/
public class LedgersIndexCheckOp {
private static final Logger LOG = LoggerFactory.getLogger(LedgersIndexCheckOp.class);
private final ServerConfiguration conf;
private final boolean verbose;
private static final String LedgersSubPath = "ledgers";
public LedgersIndexCheckOp(ServerConfiguration conf, boolean verbose) {
this.conf = conf;
this.verbose = verbose;
}
public boolean initiate() throws IOException {
File[] indexDirs = conf.getIndexDirs();
if (indexDirs == null) {
indexDirs = conf.getLedgerDirs();
}
if (indexDirs.length != conf.getLedgerDirs().length) {
throw new IOException("ledger and index dirs size not matched");
}
long startTime = System.nanoTime();
for (int i = 0; i < conf.getLedgerDirs().length; i++) {
File indexDir = indexDirs[i];
String iBasePath = BookieImpl.getCurrentDirectory(indexDir).toString();
Path indexCurrentPath = FileSystems.getDefault().getPath(iBasePath, LedgersSubPath);
LOG.info("Loading ledgers index from {}", indexCurrentPath);
LOG.info("Starting index scan");
try {
KeyValueStorage index = new KeyValueStorageRocksDB(iBasePath, LedgersSubPath,
DbConfigType.Default, conf, true);
// Read all ledgers from db
KeyValueStorage.CloseableIterator<Map.Entry<byte[], byte[]>> iterator = index.iterator();
int ctr = 0;
try {
while (iterator.hasNext()) {
ctr++;
Map.Entry<byte[], byte[]> entry = iterator.next();
long ledgerId = ArrayUtil.getLong(entry.getKey(), 0);
DbLedgerStorageDataFormats.LedgerData ledgerData =
DbLedgerStorageDataFormats.LedgerData.parseFrom(entry.getValue());
if (verbose) {
LOG.info(
"Scanned: {}, ledger: {}, exists: {}, isFenced: {}, masterKey: {}, explicitLAC: {}",
ctr,
ledgerId,
(ledgerData.hasExists() ? ledgerData.getExists() : "-"),
(ledgerData.hasFenced() ? ledgerData.getFenced() : "-"),
(ledgerData.hasMasterKey()
? Base64.getEncoder()
.encodeToString(ledgerData.getMasterKey().toByteArray())
: "-"),
(ledgerData.hasExplicitLac() ? ledgerData.getExplicitLac() : "-"));
} else if (ctr % 100 == 0) {
LOG.info("Scanned {} ledgers", ctr);
}
}
} finally {
iterator.close();
}
LOG.info("Scanned {} ledgers", ctr);
} catch (Throwable t) {
LOG.error("Index scan has failed with error", t);
return false;
}
}
LOG.info("Index scan has completed successfully. Total time: {}",
DurationFormatUtils.formatDurationHMS(
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
return true;
}
}
| 541 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/EntryLocationIndex.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import com.google.common.collect.Iterables;
import java.io.Closeable;
import java.io.IOException;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.Bookie;
import org.apache.bookkeeper.bookie.EntryLocation;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorage.Batch;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Maintains an index of the entry locations in the EntryLogger.
*
* <p>For each ledger multiple entries are stored in the same "record", represented
* by the {@link LedgerIndexPage} class.
*/
public class EntryLocationIndex implements Closeable {
private final KeyValueStorage locationsDb;
private final ConcurrentLongHashSet deletedLedgers = ConcurrentLongHashSet.newBuilder().build();
private final EntryLocationIndexStats stats;
private boolean isCompacting;
public EntryLocationIndex(ServerConfiguration conf, KeyValueStorageFactory storageFactory, String basePath,
StatsLogger stats) throws IOException {
locationsDb = storageFactory.newKeyValueStorage(basePath, "locations", DbConfigType.EntryLocation, conf);
this.stats = new EntryLocationIndexStats(
stats,
() -> {
try {
return locationsDb.count();
} catch (IOException e) {
return -1L;
}
});
}
@Override
public void close() throws IOException {
locationsDb.close();
}
public long getLocation(long ledgerId, long entryId) throws IOException {
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get();
long startTimeNanos = MathUtils.nowInNano();
boolean operationSuccess = false;
try {
if (locationsDb.get(key.array, value.array) < 0) {
if (log.isDebugEnabled()) {
log.debug("Entry not found {}@{} in db index", ledgerId, entryId);
}
return 0;
}
operationSuccess = true;
return value.getValue();
} finally {
key.recycle();
value.recycle();
if (operationSuccess) {
stats.getLookupEntryLocationStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
stats.getLookupEntryLocationStats()
.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
}
public long getLastEntryInLedger(long ledgerId) throws IOException {
if (deletedLedgers.contains(ledgerId)) {
// Ledger already deleted
if (log.isDebugEnabled()) {
log.debug("Ledger {} already deleted in db", ledgerId);
}
/**
* when Ledger already deleted,
* throw Bookie.NoEntryException same like the method
* {@link EntryLocationIndex.getLastEntryInLedgerInternal} solving ledgerId is not found.
* */
throw new Bookie.NoEntryException(ledgerId, -1);
}
return getLastEntryInLedgerInternal(ledgerId);
}
private long getLastEntryInLedgerInternal(long ledgerId) throws IOException {
LongPairWrapper maxEntryId = LongPairWrapper.get(ledgerId, Long.MAX_VALUE);
// Search the last entry in storage
Entry<byte[], byte[]> entry = locationsDb.getFloor(maxEntryId.array);
maxEntryId.recycle();
if (entry == null) {
throw new Bookie.NoEntryException(ledgerId, -1);
} else {
long foundLedgerId = ArrayUtil.getLong(entry.getKey(), 0);
long lastEntryId = ArrayUtil.getLong(entry.getKey(), 8);
if (foundLedgerId == ledgerId) {
if (log.isDebugEnabled()) {
log.debug("Found last page in storage db for ledger {} - last entry: {}", ledgerId, lastEntryId);
}
return lastEntryId;
} else {
throw new Bookie.NoEntryException(ledgerId, -1);
}
}
}
public void addLocation(long ledgerId, long entryId, long location) throws IOException {
Batch batch = locationsDb.newBatch();
addLocation(batch, ledgerId, entryId, location);
batch.flush();
batch.close();
}
public Batch newBatch() {
return locationsDb.newBatch();
}
public void addLocation(Batch batch, long ledgerId, long entryId, long location) throws IOException {
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get(location);
if (log.isDebugEnabled()) {
log.debug("Add location - ledger: {} -- entry: {} -- location: {}", ledgerId, entryId, location);
}
try {
batch.put(key.array, value.array);
} finally {
key.recycle();
value.recycle();
}
}
public void updateLocations(Iterable<EntryLocation> newLocations) throws IOException {
if (log.isDebugEnabled()) {
log.debug("Update locations -- {}", Iterables.size(newLocations));
}
Batch batch = newBatch();
// Update all the ledger index pages with the new locations
for (EntryLocation e : newLocations) {
if (log.isDebugEnabled()) {
log.debug("Update location - ledger: {} -- entry: {}", e.ledger, e.entry);
}
addLocation(batch, e.ledger, e.entry, e.location);
}
batch.flush();
batch.close();
}
public void delete(long ledgerId) throws IOException {
// We need to find all the LedgerIndexPage records belonging to one specific
// ledgers
deletedLedgers.add(ledgerId);
}
public String getEntryLocationDBPath() {
return locationsDb.getDBPath();
}
public void compact() throws IOException {
try {
isCompacting = true;
locationsDb.compact();
} finally {
isCompacting = false;
}
}
public boolean isCompacting() {
return isCompacting;
}
public void removeOffsetFromDeletedLedgers() throws IOException {
Set<Long> ledgersToDelete = deletedLedgers.items();
if (ledgersToDelete.isEmpty()) {
return;
}
LongPairWrapper firstKeyWrapper = LongPairWrapper.get(-1, -1);
LongPairWrapper lastKeyWrapper = LongPairWrapper.get(-1, -1);
log.info("Deleting indexes for ledgers: {}", ledgersToDelete);
long startTime = System.nanoTime();
try (Batch batch = locationsDb.newBatch()) {
for (long ledgerId : ledgersToDelete) {
if (log.isDebugEnabled()) {
log.debug("Deleting indexes from ledger {}", ledgerId);
}
firstKeyWrapper.set(ledgerId, 0);
lastKeyWrapper.set(ledgerId, Long.MAX_VALUE);
batch.deleteRange(firstKeyWrapper.array, lastKeyWrapper.array);
}
batch.flush();
for (long ledgerId : ledgersToDelete) {
deletedLedgers.remove(ledgerId);
}
} finally {
firstKeyWrapper.recycle();
lastKeyWrapper.recycle();
}
log.info("Deleted indexes from {} ledgers in {} seconds", ledgersToDelete.size(),
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) / 1000.0);
}
private static final Logger log = LoggerFactory.getLogger(EntryLocationIndex.class);
}
| 542 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/WriteCache.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static com.google.common.base.Preconditions.checkArgument;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashSet;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Write cache implementation.
*
* <p>The write cache will allocate the requested size from direct memory and it
* will break it down into multiple segments.
*
* <p>The entries are appended in a common buffer and indexed though a hashmap,
* until the cache is cleared.
*
* <p>There is the possibility to iterate through the stored entries in an ordered
* way, by (ledgerId, entry).
*/
public class WriteCache implements Closeable {
/**
* Consumer that is used to scan the entire write cache.
*/
public interface EntryConsumer {
void accept(long ledgerId, long entryId, ByteBuf entry) throws IOException;
}
private final ConcurrentLongLongPairHashMap index = ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(4096)
.concurrencyLevel(2 * Runtime.getRuntime().availableProcessors())
.build();
private final ConcurrentLongLongHashMap lastEntryMap = ConcurrentLongLongHashMap.newBuilder()
.expectedItems(4096)
.concurrencyLevel(2 * Runtime.getRuntime().availableProcessors())
.build();
private final ByteBuf[] cacheSegments;
private final int segmentsCount;
private final long maxCacheSize;
private final int maxSegmentSize;
private final long segmentOffsetMask;
private final long segmentOffsetBits;
private final AtomicLong cacheSize = new AtomicLong(0);
private final AtomicLong cacheOffset = new AtomicLong(0);
private final LongAdder cacheCount = new LongAdder();
private final ConcurrentLongHashSet deletedLedgers = ConcurrentLongHashSet.newBuilder().build();
private final ByteBufAllocator allocator;
public WriteCache(ByteBufAllocator allocator, long maxCacheSize) {
// Default maxSegmentSize set to 1Gb
this(allocator, maxCacheSize, 1 * 1024 * 1024 * 1024);
}
public WriteCache(ByteBufAllocator allocator, long maxCacheSize, int maxSegmentSize) {
checkArgument(maxSegmentSize > 0);
long alignedMaxSegmentSize = alignToPowerOfTwo(maxSegmentSize);
checkArgument(maxSegmentSize == alignedMaxSegmentSize, "Max segment size needs to be in form of 2^n");
this.allocator = allocator;
this.maxCacheSize = maxCacheSize;
this.maxSegmentSize = (int) maxSegmentSize;
this.segmentOffsetMask = maxSegmentSize - 1;
this.segmentOffsetBits = 63 - Long.numberOfLeadingZeros(maxSegmentSize);
this.segmentsCount = 1 + (int) (maxCacheSize / maxSegmentSize);
this.cacheSegments = new ByteBuf[segmentsCount];
for (int i = 0; i < segmentsCount - 1; i++) {
// All intermediate segments will be full-size
cacheSegments[i] = Unpooled.directBuffer(maxSegmentSize, maxSegmentSize);
}
int lastSegmentSize = (int) (maxCacheSize % maxSegmentSize);
cacheSegments[segmentsCount - 1] = Unpooled.directBuffer(lastSegmentSize, lastSegmentSize);
}
public void clear() {
cacheSize.set(0L);
cacheOffset.set(0L);
cacheCount.reset();
index.clear();
lastEntryMap.clear();
deletedLedgers.clear();
}
@Override
public void close() {
for (ByteBuf buf : cacheSegments) {
buf.release();
}
}
public boolean put(long ledgerId, long entryId, ByteBuf entry) {
int size = entry.readableBytes();
// Align to 64 bytes so that different threads will not contend the same L1
// cache line
int alignedSize = align64(size);
long offset;
int localOffset;
int segmentIdx;
while (true) {
offset = cacheOffset.getAndAdd(alignedSize);
localOffset = (int) (offset & segmentOffsetMask);
segmentIdx = (int) (offset >>> segmentOffsetBits);
if ((offset + size) > maxCacheSize) {
// Cache is full
return false;
} else if (maxSegmentSize - localOffset < size) {
// If an entry is at the end of a segment, we need to get a new offset and try
// again in next segment
continue;
} else {
// Found a good offset
break;
}
}
cacheSegments[segmentIdx].setBytes(localOffset, entry, entry.readerIndex(), entry.readableBytes());
// Update last entryId for ledger. This logic is to handle writes for the same
// ledger coming out of order and from different thread, though in practice it
// should not happen and the compareAndSet should be always uncontended.
while (true) {
long currentLastEntryId = lastEntryMap.get(ledgerId);
if (currentLastEntryId > entryId) {
// A newer entry is already there
break;
}
if (lastEntryMap.compareAndSet(ledgerId, currentLastEntryId, entryId)) {
break;
}
}
index.put(ledgerId, entryId, offset, size);
cacheCount.increment();
cacheSize.addAndGet(size);
return true;
}
public ByteBuf get(long ledgerId, long entryId) {
LongPair result = index.get(ledgerId, entryId);
if (result == null) {
return null;
}
long offset = result.first;
int size = (int) result.second;
ByteBuf entry = allocator.buffer(size, size);
int localOffset = (int) (offset & segmentOffsetMask);
int segmentIdx = (int) (offset >>> segmentOffsetBits);
entry.writeBytes(cacheSegments[segmentIdx], localOffset, size);
return entry;
}
public boolean hasEntry(long ledgerId, long entryId) {
return index.get(ledgerId, entryId) != null;
}
public ByteBuf getLastEntry(long ledgerId) {
long lastEntryId = lastEntryMap.get(ledgerId);
if (lastEntryId == -1) {
// Ledger not found in write cache
return null;
} else {
return get(ledgerId, lastEntryId);
}
}
public void deleteLedger(long ledgerId) {
deletedLedgers.add(ledgerId);
}
public void forEach(EntryConsumer consumer) throws IOException {
sortedEntriesLock.lock();
try {
int entriesToSort = (int) index.size();
int arrayLen = entriesToSort * 4;
if (sortedEntries == null || sortedEntries.length < arrayLen) {
sortedEntries = new long[(int) (arrayLen * 2)];
}
long startTime = MathUtils.nowInNano();
sortedEntriesIdx = 0;
index.forEach((ledgerId, entryId, offset, length) -> {
if (deletedLedgers.contains(ledgerId)) {
// Ignore deleted ledgers
return;
}
sortedEntries[sortedEntriesIdx] = ledgerId;
sortedEntries[sortedEntriesIdx + 1] = entryId;
sortedEntries[sortedEntriesIdx + 2] = offset;
sortedEntries[sortedEntriesIdx + 3] = length;
sortedEntriesIdx += 4;
});
if (log.isDebugEnabled()) {
log.debug("iteration took {} ms", MathUtils.elapsedNanos(startTime) / 1e6);
}
startTime = MathUtils.nowInNano();
// Sort entries by (ledgerId, entryId) maintaining the 4 items groups
ArrayGroupSort.sort(sortedEntries, 0, sortedEntriesIdx);
if (log.isDebugEnabled()) {
log.debug("sorting {} ms", (MathUtils.elapsedNanos(startTime) / 1e6));
}
startTime = MathUtils.nowInNano();
ByteBuf[] entrySegments = new ByteBuf[segmentsCount];
for (int i = 0; i < segmentsCount; i++) {
entrySegments[i] = cacheSegments[i].slice(0, cacheSegments[i].capacity());
}
for (int i = 0; i < sortedEntriesIdx; i += 4) {
long ledgerId = sortedEntries[i];
long entryId = sortedEntries[i + 1];
long offset = sortedEntries[i + 2];
long length = sortedEntries[i + 3];
int localOffset = (int) (offset & segmentOffsetMask);
int segmentIdx = (int) (offset >>> segmentOffsetBits);
ByteBuf entry = entrySegments[segmentIdx];
entry.setIndex(localOffset, localOffset + (int) length);
consumer.accept(ledgerId, entryId, entry);
}
if (log.isDebugEnabled()) {
log.debug("entry log adding {} ms", MathUtils.elapsedNanos(startTime) / 1e6);
}
} finally {
sortedEntriesLock.unlock();
}
}
public long size() {
return cacheSize.get();
}
public long count() {
return cacheCount.sum();
}
public boolean isEmpty() {
return cacheSize.get() == 0L;
}
private static final int ALIGN_64_MASK = ~(64 - 1);
static int align64(int size) {
return (size + 64 - 1) & ALIGN_64_MASK;
}
private static long alignToPowerOfTwo(long n) {
return (long) Math.pow(2, 64 - Long.numberOfLeadingZeros(n - 1));
}
private final ReentrantLock sortedEntriesLock = new ReentrantLock();
private long[] sortedEntries;
private int sortedEntriesIdx;
private static final Logger log = LoggerFactory.getLogger(WriteCache.class);
}
| 543 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/KeyValueStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import java.io.Closeable;
import java.io.IOException;
import java.util.Map.Entry;
/**
* Abstraction of a generic key-value local database.
*/
public interface KeyValueStorage extends Closeable {
void put(byte[] key, byte[] value) throws IOException;
/**
* Get the value associated with the given key.
*
* @param key
* the key to lookup
* @return the value or null if the key was not found
*/
byte[] get(byte[] key) throws IOException;
/**
* Get the value associated with the given key.
*
* <p>This method will use the provided array store the value
*
* @param key
* the key to lookup
* @param value
* an array where to store the result
* @return -1 if the entry was not found or the length of the value
* @throws IOException
* if the value array could not hold the result
*/
int get(byte[] key, byte[] value) throws IOException;
/**
* Get the entry whose key is the biggest and it's lesser than the supplied key.
*
* <p>For example if the db contains :
*
* <pre>
* {
* 1 : 'a',
* 2 : 'b',
* 3 : 'c'
* }
* </pre>
*
* <p>Then:
*
* <pre>
* getFloor(3) --> (2, 'b')
* </pre>
*
* @param key
* the non-inclusive upper limit key
* @return the entry before or null if there's no entry before key
*/
Entry<byte[], byte[]> getFloor(byte[] key) throws IOException;
/**
* Get the entry whose key is bigger or equal the supplied key.
*
* @param key
* @return
* @throws IOException
*/
Entry<byte[], byte[]> getCeil(byte[] key) throws IOException;
/**
*
* @param key
* @throws IOException
*/
void delete(byte[] key) throws IOException;
/**
* Compact storage within a specified range.
*
* @param firstKey
* the first key in the range (included)
* @param lastKey
* the last key in the range (not included)
*/
default void compact(byte[] firstKey, byte[] lastKey) throws IOException {}
/**
* Compact storage full range.
*/
default void compact() throws IOException {}
/**
* Get storage path.
*/
String getDBPath();
/**
* Get an iterator over to scan sequentially through all the keys in the
* database.
*
* @return
*/
CloseableIterator<byte[]> keys();
/**
* Get an iterator over to scan sequentially through all the keys within a
* specified range.
*
* @param firstKey
* the first key in the range (included)
* @param lastKey
* the lastKey in the range (not included)
*
*/
CloseableIterator<byte[]> keys(byte[] firstKey, byte[] lastKey);
/**
* Return an iterator object that can be used to sequentially scan through all
* the entries in the database.
*/
CloseableIterator<Entry<byte[], byte[]>> iterator();
/**
* Commit all pending write to durable storage.
*/
void sync() throws IOException;
/**
* @return the number of keys.
*/
long count() throws IOException;
/**
* Iterator interface.
*
* @param <T>
*/
interface CloseableIterator<T> extends Closeable {
boolean hasNext() throws IOException;
T next() throws IOException;
}
Batch newBatch();
/**
* Interface for a batch to be written in the storage.
*/
public interface Batch extends Closeable {
void put(byte[] key, byte[] value) throws IOException;
void remove(byte[] key) throws IOException;
void deleteRange(byte[] beginKey, byte[] endKey) throws IOException;
void clear();
void flush() throws IOException;
default int batchCount() {
return -1;
}
}
}
| 544 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgerMetadataIndexStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for ledger metadata index stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Ledger metadata index stats"
)
@Getter
class LedgerMetadataIndexStats {
private static final String LEDGERS_COUNT = "ledgers-count";
@StatsDoc(
name = LEDGERS_COUNT,
help = "Current number of ledgers"
)
private final Gauge<Long> ledgersCountGauge;
LedgerMetadataIndexStats(StatsLogger statsLogger,
Supplier<Long> ledgersCountSupplier) {
ledgersCountGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return ledgersCountSupplier.get();
}
};
statsLogger.registerGauge(LEDGERS_COUNT, ledgersCountGauge);
}
}
| 545 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/KeyValueStorageRocksDB.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static com.google.common.base.Preconditions.checkState;
//CHECKSTYLE.OFF: IllegalImport
//CHECKSTYLE.OFF: ImportOrder
import static io.netty.util.internal.PlatformDependent.maxDirectMemory;
//CHECKSTYLE.ON: IllegalImport
//CHECKSTYLE.ON: ImportOrder
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.rocksdb.BlockBasedTableConfig;
import org.rocksdb.BloomFilter;
import org.rocksdb.Cache;
import org.rocksdb.ChecksumType;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.CompressionType;
import org.rocksdb.DBOptions;
import org.rocksdb.Env;
import org.rocksdb.InfoLogLevel;
import org.rocksdb.LRUCache;
import org.rocksdb.LiveFileMetaData;
import org.rocksdb.Options;
import org.rocksdb.OptionsUtil;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.rocksdb.RocksIterator;
import org.rocksdb.RocksObject;
import org.rocksdb.Slice;
import org.rocksdb.WriteBatch;
import org.rocksdb.WriteOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* RocksDB based implementation of the KeyValueStorage.
*/
public class KeyValueStorageRocksDB implements KeyValueStorage {
static KeyValueStorageFactory factory = (defaultBasePath, subPath, dbConfigType, conf) ->
new KeyValueStorageRocksDB(defaultBasePath, subPath, dbConfigType, conf);
private final RocksDB db;
private RocksObject options;
private List<ColumnFamilyDescriptor> columnFamilyDescriptors;
private final WriteOptions optionSync;
private final WriteOptions optionDontSync;
private Cache cache;
private final ReadOptions optionCache;
private final ReadOptions optionDontCache;
private final WriteBatch emptyBatch;
private final int writeBatchMaxSize;
private String dbPath;
private static final String ROCKSDB_LOG_PATH = "dbStorage_rocksDB_logPath";
private static final String ROCKSDB_LOG_LEVEL = "dbStorage_rocksDB_logLevel";
private static final String ROCKSDB_LZ4_COMPRESSION_ENABLED = "dbStorage_rocksDB_lz4CompressionEnabled";
private static final String ROCKSDB_WRITE_BUFFER_SIZE_MB = "dbStorage_rocksDB_writeBufferSizeMB";
private static final String ROCKSDB_SST_SIZE_MB = "dbStorage_rocksDB_sstSizeInMB";
private static final String ROCKSDB_BLOCK_SIZE = "dbStorage_rocksDB_blockSize";
private static final String ROCKSDB_BLOOM_FILTERS_BITS_PER_KEY = "dbStorage_rocksDB_bloomFilterBitsPerKey";
private static final String ROCKSDB_BLOCK_CACHE_SIZE = "dbStorage_rocksDB_blockCacheSize";
private static final String ROCKSDB_NUM_LEVELS = "dbStorage_rocksDB_numLevels";
private static final String ROCKSDB_NUM_FILES_IN_LEVEL0 = "dbStorage_rocksDB_numFilesInLevel0";
private static final String ROCKSDB_MAX_SIZE_IN_LEVEL1_MB = "dbStorage_rocksDB_maxSizeInLevel1MB";
private static final String ROCKSDB_FORMAT_VERSION = "dbStorage_rocksDB_format_version";
private static final String ROCKSDB_CHECKSUM_TYPE = "dbStorage_rocksDB_checksum_type";
public KeyValueStorageRocksDB(String basePath, String subPath, DbConfigType dbConfigType, ServerConfiguration conf)
throws IOException {
this(basePath, subPath, dbConfigType, conf, false);
}
public KeyValueStorageRocksDB(String basePath, String subPath, DbConfigType dbConfigType, ServerConfiguration conf,
boolean readOnly)
throws IOException {
try {
RocksDB.loadLibrary();
} catch (Throwable t) {
throw new IOException("Failed to load RocksDB JNI library", t);
}
this.optionSync = new WriteOptions();
this.optionDontSync = new WriteOptions();
this.optionCache = new ReadOptions();
this.optionDontCache = new ReadOptions();
this.emptyBatch = new WriteBatch();
String dbFilePath = "";
if (dbConfigType == DbConfigType.EntryLocation) {
dbFilePath = conf.getEntryLocationRocksdbConf();
} else if (dbConfigType == DbConfigType.LedgerMetadata) {
dbFilePath = conf.getLedgerMetadataRocksdbConf();
} else {
dbFilePath = conf.getDefaultRocksDBConf();
}
log.info("Searching for a RocksDB configuration file in {}", dbFilePath);
if (Paths.get(dbFilePath).toFile().exists()) {
log.info("Found a RocksDB configuration file and using it to initialize the RocksDB");
db = initializeRocksDBWithConfFile(basePath, subPath, dbConfigType, conf, readOnly, dbFilePath);
} else {
log.info("Haven't found the file and read the configuration from the main bookkeeper configuration");
db = initializeRocksDBWithBookieConf(basePath, subPath, dbConfigType, conf, readOnly);
}
optionSync.setSync(true);
optionDontSync.setSync(false);
optionCache.setFillCache(true);
optionDontCache.setFillCache(false);
this.writeBatchMaxSize = conf.getMaxOperationNumbersInSingleRocksDBBatch();
}
private RocksDB initializeRocksDBWithConfFile(String basePath, String subPath, DbConfigType dbConfigType,
ServerConfiguration conf, boolean readOnly,
String dbFilePath) throws IOException {
DBOptions dbOptions = new DBOptions();
final List<ColumnFamilyDescriptor> cfDescs = new ArrayList<>();
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
try {
OptionsUtil.loadOptionsFromFile(dbFilePath, Env.getDefault(), dbOptions, cfDescs, false);
// Configure file path
String logPath = conf.getString(ROCKSDB_LOG_PATH, "");
if (!logPath.isEmpty()) {
Path logPathSetting = FileSystems.getDefault().getPath(logPath, subPath);
Files.createDirectories(logPathSetting);
log.info("RocksDB<{}> log path: {}", subPath, logPathSetting);
dbOptions.setDbLogDir(logPathSetting.toString());
}
this.dbPath = FileSystems.getDefault().getPath(basePath, subPath).toFile().toString();
this.options = dbOptions;
this.columnFamilyDescriptors = cfDescs;
if (readOnly) {
return RocksDB.openReadOnly(dbOptions, dbPath, cfDescs, cfHandles);
} else {
return RocksDB.open(dbOptions, dbPath, cfDescs, cfHandles);
}
} catch (RocksDBException e) {
throw new IOException("Error open RocksDB database", e);
}
}
private RocksDB initializeRocksDBWithBookieConf(String basePath, String subPath, DbConfigType dbConfigType,
ServerConfiguration conf, boolean readOnly) throws IOException {
Options options = new Options();
options.setCreateIfMissing(true);
ChecksumType checksumType = ChecksumType.valueOf(conf.getString(ROCKSDB_CHECKSUM_TYPE, "kxxHash"));
if (dbConfigType == DbConfigType.EntryLocation) {
/* Set default RocksDB block-cache size to 10% / numberOfLedgers of direct memory, unless override */
int ledgerDirsSize = conf.getLedgerDirNames().length;
long defaultRocksDBBlockCacheSizeBytes = maxDirectMemory() / ledgerDirsSize / 10;
long blockCacheSize = DbLedgerStorage.getLongVariableOrDefault(conf, ROCKSDB_BLOCK_CACHE_SIZE,
defaultRocksDBBlockCacheSizeBytes);
long writeBufferSizeMB = conf.getInt(ROCKSDB_WRITE_BUFFER_SIZE_MB, 64);
long sstSizeMB = conf.getInt(ROCKSDB_SST_SIZE_MB, 64);
int numLevels = conf.getInt(ROCKSDB_NUM_LEVELS, -1);
int numFilesInLevel0 = conf.getInt(ROCKSDB_NUM_FILES_IN_LEVEL0, 4);
long maxSizeInLevel1MB = conf.getLong(ROCKSDB_MAX_SIZE_IN_LEVEL1_MB, 256);
int blockSize = conf.getInt(ROCKSDB_BLOCK_SIZE, 64 * 1024);
int bloomFilterBitsPerKey = conf.getInt(ROCKSDB_BLOOM_FILTERS_BITS_PER_KEY, 10);
boolean lz4CompressionEnabled = conf.getBoolean(ROCKSDB_LZ4_COMPRESSION_ENABLED, true);
int formatVersion = conf.getInt(ROCKSDB_FORMAT_VERSION, 2);
if (lz4CompressionEnabled) {
options.setCompressionType(CompressionType.LZ4_COMPRESSION);
}
options.setWriteBufferSize(writeBufferSizeMB * 1024 * 1024);
options.setMaxWriteBufferNumber(4);
if (numLevels > 0) {
options.setNumLevels(numLevels);
}
options.setLevelZeroFileNumCompactionTrigger(numFilesInLevel0);
options.setMaxBytesForLevelBase(maxSizeInLevel1MB * 1024 * 1024);
options.setMaxBackgroundJobs(32);
options.setIncreaseParallelism(32);
options.setMaxTotalWalSize(512 * 1024 * 1024);
options.setMaxOpenFiles(-1);
options.setTargetFileSizeBase(sstSizeMB * 1024 * 1024);
options.setDeleteObsoleteFilesPeriodMicros(TimeUnit.HOURS.toMicros(1));
this.cache = new LRUCache(blockCacheSize);
BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
tableOptions.setBlockSize(blockSize);
tableOptions.setBlockCache(cache);
tableOptions.setFormatVersion(formatVersion);
tableOptions.setChecksumType(checksumType);
if (bloomFilterBitsPerKey > 0) {
tableOptions.setFilterPolicy(new BloomFilter(bloomFilterBitsPerKey, false));
}
// Options best suited for HDDs
tableOptions.setCacheIndexAndFilterBlocks(true);
options.setLevelCompactionDynamicLevelBytes(true);
options.setTableFormatConfig(tableOptions);
} else {
this.cache = null;
BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
tableOptions.setChecksumType(checksumType);
options.setTableFormatConfig(tableOptions);
}
// Configure file path
String logPath = conf.getString(ROCKSDB_LOG_PATH, "");
if (!logPath.isEmpty()) {
Path logPathSetting = FileSystems.getDefault().getPath(logPath, subPath);
Files.createDirectories(logPathSetting);
log.info("RocksDB<{}> log path: {}", subPath, logPathSetting);
options.setDbLogDir(logPathSetting.toString());
}
this.dbPath = FileSystems.getDefault().getPath(basePath, subPath).toFile().toString();
// Configure log level
String logLevel = conf.getString(ROCKSDB_LOG_LEVEL, "info");
switch (logLevel) {
case "debug":
options.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
break;
case "info":
options.setInfoLogLevel(InfoLogLevel.INFO_LEVEL);
break;
case "warn":
options.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
break;
case "error":
options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
break;
default:
log.warn("Unrecognized RockDB log level: {}", logLevel);
}
// Keep log files for 1month
options.setKeepLogFileNum(30);
options.setLogFileTimeToRoll(TimeUnit.DAYS.toSeconds(1));
this.options = options;
try {
if (readOnly) {
return RocksDB.openReadOnly(options, dbPath);
} else {
return RocksDB.open(options, dbPath);
}
} catch (RocksDBException e) {
throw new IOException("Error open RocksDB database", e);
}
}
@Override
public void close() throws IOException {
db.close();
if (cache != null) {
cache.close();
}
if (options != null) {
options.close();
}
optionSync.close();
optionDontSync.close();
optionCache.close();
optionDontCache.close();
emptyBatch.close();
}
@Override
public void put(byte[] key, byte[] value) throws IOException {
try {
db.put(optionDontSync, key, value);
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB put", e);
}
}
@Override
public byte[] get(byte[] key) throws IOException {
try {
return db.get(key);
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB get", e);
}
}
@Override
public int get(byte[] key, byte[] value) throws IOException {
try {
int res = db.get(key, value);
if (res == RocksDB.NOT_FOUND) {
return -1;
} else if (res > value.length) {
throw new IOException("Value array is too small to fit the result");
} else {
return res;
}
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB get", e);
}
}
@Override
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public Entry<byte[], byte[]> getFloor(byte[] key) throws IOException {
try (Slice upperBound = new Slice(key);
ReadOptions option = new ReadOptions(optionCache).setIterateUpperBound(upperBound);
RocksIterator iterator = db.newIterator(option)) {
iterator.seekToLast();
if (iterator.isValid()) {
return new EntryWrapper(iterator.key(), iterator.value());
}
}
return null;
}
@Override
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public Entry<byte[], byte[]> getCeil(byte[] key) throws IOException {
try (RocksIterator iterator = db.newIterator(optionCache)) {
// Position the iterator on the record whose key is >= to the supplied key
iterator.seek(key);
if (iterator.isValid()) {
return new EntryWrapper(iterator.key(), iterator.value());
} else {
return null;
}
}
}
@Override
public void delete(byte[] key) throws IOException {
try {
db.delete(optionDontSync, key);
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB delete", e);
}
}
@Override
public String getDBPath() {
return dbPath;
}
@Override
public void compact(byte[] firstKey, byte[] lastKey) throws IOException {
try {
db.compactRange(firstKey, lastKey);
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB compact", e);
}
}
@Override
public void compact() throws IOException {
try {
final long start = System.currentTimeMillis();
final int oriRocksDBFileCount = db.getLiveFilesMetaData().size();
final long oriRocksDBSize = getRocksDBSize();
log.info("Starting RocksDB {} compact, current RocksDB hold {} files and {} Bytes.",
db.getName(), oriRocksDBFileCount, oriRocksDBSize);
db.compactRange();
final long end = System.currentTimeMillis();
final int rocksDBFileCount = db.getLiveFilesMetaData().size();
final long rocksDBSize = getRocksDBSize();
log.info("RocksDB {} compact finished {} ms, space reduced {} Bytes, current hold {} files and {} Bytes.",
db.getName(), end - start, oriRocksDBSize - rocksDBSize, rocksDBFileCount, rocksDBSize);
} catch (RocksDBException e) {
throw new IOException("Error in RocksDB compact", e);
}
}
private long getRocksDBSize() {
List<LiveFileMetaData> liveFilesMetaData = db.getLiveFilesMetaData();
long rocksDBFileSize = 0L;
for (LiveFileMetaData fileMetaData : liveFilesMetaData) {
rocksDBFileSize += fileMetaData.size();
}
return rocksDBFileSize;
}
@Override
public void sync() throws IOException {
try {
db.write(optionSync, emptyBatch);
} catch (RocksDBException e) {
throw new IOException(e);
}
}
@Override
public CloseableIterator<byte[]> keys() {
final RocksIterator iterator = db.newIterator(optionCache);
iterator.seekToFirst();
return new CloseableIterator<byte[]>() {
@Override
public boolean hasNext() {
return iterator.isValid();
}
@Override
public byte[] next() {
checkState(iterator.isValid());
byte[] key = iterator.key();
iterator.next();
return key;
}
@Override
public void close() {
iterator.close();
}
};
}
@Override
public CloseableIterator<byte[]> keys(byte[] firstKey, byte[] lastKey) {
final Slice upperBound = new Slice(lastKey);
final ReadOptions option = new ReadOptions(optionCache).setIterateUpperBound(upperBound);
final RocksIterator iterator = db.newIterator(option);
iterator.seek(firstKey);
return new CloseableIterator<byte[]>() {
@Override
public boolean hasNext() {
return iterator.isValid();
}
@Override
public byte[] next() {
checkState(iterator.isValid());
byte[] key = iterator.key();
iterator.next();
return key;
}
@Override
public void close() {
iterator.close();
option.close();
upperBound.close();
}
};
}
@Override
public CloseableIterator<Entry<byte[], byte[]>> iterator() {
final RocksIterator iterator = db.newIterator(optionDontCache);
iterator.seekToFirst();
final EntryWrapper entryWrapper = new EntryWrapper();
return new CloseableIterator<Entry<byte[], byte[]>>() {
@Override
public boolean hasNext() {
return iterator.isValid();
}
@Override
public Entry<byte[], byte[]> next() {
checkState(iterator.isValid());
entryWrapper.key = iterator.key();
entryWrapper.value = iterator.value();
iterator.next();
return entryWrapper;
}
@Override
public void close() {
iterator.close();
}
};
}
@Override
public long count() throws IOException {
try {
return db.getLongProperty("rocksdb.estimate-num-keys");
} catch (RocksDBException e) {
throw new IOException("Error in getting records count", e);
}
}
@Override
public Batch newBatch() {
return new RocksDBBatch(writeBatchMaxSize);
}
private class RocksDBBatch implements Batch {
private final WriteBatch writeBatch = new WriteBatch();
private final int batchSize;
private int batchCount = 0;
RocksDBBatch(int batchSize) {
this.batchSize = batchSize;
}
@Override
public void close() {
writeBatch.close();
batchCount = 0;
}
@Override
public void put(byte[] key, byte[] value) throws IOException {
try {
writeBatch.put(key, value);
countBatchAndFlushIfNeeded();
} catch (RocksDBException e) {
throw new IOException("Failed to flush RocksDB batch", e);
}
}
@Override
public void remove(byte[] key) throws IOException {
try {
writeBatch.delete(key);
countBatchAndFlushIfNeeded();
} catch (RocksDBException e) {
throw new IOException("Failed to flush RocksDB batch", e);
}
}
@Override
public void clear() {
writeBatch.clear();
batchCount = 0;
}
@Override
public void deleteRange(byte[] beginKey, byte[] endKey) throws IOException {
try {
writeBatch.deleteRange(beginKey, endKey);
countBatchAndFlushIfNeeded();
} catch (RocksDBException e) {
throw new IOException("Failed to flush RocksDB batch", e);
}
}
private void countBatchAndFlushIfNeeded() throws IOException {
if (++batchCount >= batchSize) {
flush();
clear();
}
}
@Override
public int batchCount() {
return batchCount;
}
@Override
public void flush() throws IOException {
try {
db.write(optionSync, writeBatch);
} catch (RocksDBException e) {
throw new IOException("Failed to flush RocksDB batch", e);
}
}
}
private static final class EntryWrapper implements Entry<byte[], byte[]> {
// This is not final since the iterator will reuse the same EntryWrapper
// instance at each step
private byte[] key;
private byte[] value;
public EntryWrapper() {
this.key = null;
this.value = null;
}
public EntryWrapper(byte[] key, byte[] value) {
this.key = key;
this.value = value;
}
@Override
public byte[] setValue(byte[] value) {
throw new UnsupportedOperationException();
}
@Override
public byte[] getValue() {
return value;
}
@Override
public byte[] getKey() {
return key;
}
}
RocksDB db() {
return db;
}
List<ColumnFamilyDescriptor> getColumnFamilyDescriptors() {
return columnFamilyDescriptors;
}
RocksObject getOptions() {
return options;
}
private static final Logger log = LoggerFactory.getLogger(KeyValueStorageRocksDB.class);
}
| 546 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/DbLedgerStorageStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for db ledger storage stats with one instance per
* ledger directory.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "DbLedgerStorage related stats"
)
@Getter
class DbLedgerStorageStats {
private static final String ADD_ENTRY = "add-entry";
private static final String READ_ENTRY = "read-entry";
private static final String READ_ENTRY_LOCATIONS_INDEX_TIME = "read-locations-index-time";
private static final String READ_ENTRYLOG_TIME = "read-entrylog-time";
private static final String WRITE_CACHE_HITS = "write-cache-hits";
private static final String WRITE_CACHE_MISSES = "write-cache-misses";
private static final String READ_CACHE_HITS = "read-cache-hits";
private static final String READ_CACHE_MISSES = "read-cache-misses";
private static final String READAHEAD_BATCH_COUNT = "readahead-batch-count";
private static final String READAHEAD_BATCH_SIZE = "readahead-batch-size";
private static final String READAHEAD_TIME = "readahead-time";
private static final String FLUSH = "flush";
private static final String FLUSH_ENTRYLOG = "flush-entrylog";
private static final String FLUSH_LOCATIONS_INDEX = "flush-locations-index";
private static final String FLUSH_LEDGER_INDEX = "flush-ledger-index";
private static final String FLUSH_SIZE = "flush-size";
@Deprecated
private static final String THROTTLED_WRITE_REQUESTS = "throttled-write-requests";
// throttled-write-requests is deprecated, use new metric: throttled-write
private static final String THROTTLED_WRITE = "throttled-write";
private static final String REJECTED_WRITE_REQUESTS = "rejected-write-requests";
private static final String WRITE_CACHE_SIZE = "write-cache-size";
private static final String WRITE_CACHE_COUNT = "write-cache-count";
private static final String READ_CACHE_SIZE = "read-cache-size";
private static final String READ_CACHE_COUNT = "read-cache-count";
@StatsDoc(
name = ADD_ENTRY,
help = "operation stats of adding entries to db ledger storage",
parent = BOOKIE_ADD_ENTRY
)
private final OpStatsLogger addEntryStats;
@StatsDoc(
name = READ_ENTRY,
help = "operation stats of reading entries from db ledger storage",
parent = BOOKIE_READ_ENTRY
)
private final OpStatsLogger readEntryStats;
@StatsDoc(
name = READ_ENTRY_LOCATIONS_INDEX_TIME,
help = "time spent reading entries from the locations index of the db ledger storage engine",
parent = READ_ENTRY
)
private final Counter readFromLocationIndexTime;
@StatsDoc(
name = READ_ENTRYLOG_TIME,
help = "time spent reading entries from the entry log files of the db ledger storage engine",
parent = READ_ENTRY
)
private final Counter readFromEntryLogTime;
@StatsDoc(
name = WRITE_CACHE_HITS,
help = "number of write cache hits (on reads)",
parent = READ_ENTRY
)
private final Counter writeCacheHitCounter;
@StatsDoc(
name = WRITE_CACHE_MISSES,
help = "number of write cache misses (on reads)",
parent = READ_ENTRY
)
private final Counter writeCacheMissCounter;
@StatsDoc(
name = READ_CACHE_HITS,
help = "number of read cache hits",
parent = READ_ENTRY
)
private final Counter readCacheHitCounter;
@StatsDoc(
name = READ_CACHE_MISSES,
help = "number of read cache misses",
parent = READ_ENTRY
)
private final Counter readCacheMissCounter;
@StatsDoc(
name = READAHEAD_BATCH_COUNT,
help = "the distribution of num of entries to read in one readahead batch"
)
private final OpStatsLogger readAheadBatchCountStats;
@StatsDoc(
name = READAHEAD_BATCH_SIZE,
help = "the distribution of num of bytes to read in one readahead batch"
)
private final OpStatsLogger readAheadBatchSizeStats;
@StatsDoc(
name = READAHEAD_TIME,
help = "Time spent on readahead operations"
)
private final Counter readAheadTime;
@StatsDoc(
name = FLUSH,
help = "operation stats of flushing write cache to entry log files"
)
private final OpStatsLogger flushStats;
@StatsDoc(
name = FLUSH_ENTRYLOG,
help = "operation stats of flushing to the current entry log file"
)
private final OpStatsLogger flushEntryLogStats;
@StatsDoc(
name = FLUSH_LOCATIONS_INDEX,
help = "operation stats of flushing to the locations index"
)
private final OpStatsLogger flushLocationIndexStats;
@StatsDoc(
name = FLUSH_LOCATIONS_INDEX,
help = "operation stats of flushing to the ledger index"
)
private final OpStatsLogger flushLedgerIndexStats;
@StatsDoc(
name = FLUSH_SIZE,
help = "the distribution of number of bytes flushed from write cache to entry log files"
)
private final OpStatsLogger flushSizeStats;
@StatsDoc(
name = THROTTLED_WRITE_REQUESTS,
help = "The number of requests throttled due to write cache is full"
)
private final Counter throttledWriteRequests;
@StatsDoc(
name = THROTTLED_WRITE,
help = "The stats of throttled write due to write cache is full"
)
private final OpStatsLogger throttledWriteStats;
@StatsDoc(
name = REJECTED_WRITE_REQUESTS,
help = "The number of requests rejected due to write cache is full"
)
private final Counter rejectedWriteRequests;
@StatsDoc(
name = WRITE_CACHE_SIZE,
help = "Current number of bytes in write cache"
)
private final Gauge<Long> writeCacheSizeGauge;
@StatsDoc(
name = WRITE_CACHE_COUNT,
help = "Current number of entries in write cache"
)
private final Gauge<Long> writeCacheCountGauge;
@StatsDoc(
name = READ_CACHE_SIZE,
help = "Current number of bytes in read cache"
)
private final Gauge<Long> readCacheSizeGauge;
@StatsDoc(
name = READ_CACHE_COUNT,
help = "Current number of entries in read cache"
)
private final Gauge<Long> readCacheCountGauge;
DbLedgerStorageStats(StatsLogger stats,
Supplier<Long> writeCacheSizeSupplier,
Supplier<Long> writeCacheCountSupplier,
Supplier<Long> readCacheSizeSupplier,
Supplier<Long> readCacheCountSupplier) {
addEntryStats = stats.getThreadScopedOpStatsLogger(ADD_ENTRY);
readEntryStats = stats.getThreadScopedOpStatsLogger(READ_ENTRY);
readFromLocationIndexTime = stats.getThreadScopedCounter(READ_ENTRY_LOCATIONS_INDEX_TIME);
readFromEntryLogTime = stats.getThreadScopedCounter(READ_ENTRYLOG_TIME);
readCacheHitCounter = stats.getCounter(READ_CACHE_HITS);
readCacheMissCounter = stats.getCounter(READ_CACHE_MISSES);
writeCacheHitCounter = stats.getCounter(WRITE_CACHE_HITS);
writeCacheMissCounter = stats.getCounter(WRITE_CACHE_MISSES);
readAheadBatchCountStats = stats.getOpStatsLogger(READAHEAD_BATCH_COUNT);
readAheadBatchSizeStats = stats.getOpStatsLogger(READAHEAD_BATCH_SIZE);
readAheadTime = stats.getThreadScopedCounter(READAHEAD_TIME);
flushStats = stats.getOpStatsLogger(FLUSH);
flushEntryLogStats = stats.getOpStatsLogger(FLUSH_ENTRYLOG);
flushLocationIndexStats = stats.getOpStatsLogger(FLUSH_LOCATIONS_INDEX);
flushLedgerIndexStats = stats.getOpStatsLogger(FLUSH_LEDGER_INDEX);
flushSizeStats = stats.getOpStatsLogger(FLUSH_SIZE);
throttledWriteRequests = stats.getThreadScopedCounter(THROTTLED_WRITE_REQUESTS);
throttledWriteStats = stats.getOpStatsLogger(THROTTLED_WRITE);
rejectedWriteRequests = stats.getThreadScopedCounter(REJECTED_WRITE_REQUESTS);
writeCacheSizeGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return writeCacheSizeSupplier.get();
}
};
stats.registerGauge(WRITE_CACHE_SIZE, writeCacheSizeGauge);
writeCacheCountGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return writeCacheCountSupplier.get();
}
};
stats.registerGauge(WRITE_CACHE_COUNT, writeCacheCountGauge);
readCacheSizeGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return readCacheSizeSupplier.get();
}
};
stats.registerGauge(READ_CACHE_SIZE, readCacheSizeGauge);
readCacheCountGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return readCacheCountSupplier.get();
}
};
stats.registerGauge(READ_CACHE_COUNT, readCacheCountGauge);
}
}
| 547 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/SingleDirectoryDbLedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.StampedLock;
import org.apache.bookkeeper.bookie.Bookie;
import org.apache.bookkeeper.bookie.Bookie.NoEntryException;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.BookieException.OperationRejectedException;
import org.apache.bookkeeper.bookie.CheckpointSource;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.Checkpointer;
import org.apache.bookkeeper.bookie.CompactableLedgerStorage;
import org.apache.bookkeeper.bookie.EntryLocation;
import org.apache.bookkeeper.bookie.GarbageCollectionStatus;
import org.apache.bookkeeper.bookie.GarbageCollectorThread;
import org.apache.bookkeeper.bookie.LastAddConfirmedUpdateNotification;
import org.apache.bookkeeper.bookie.LedgerCache;
import org.apache.bookkeeper.bookie.LedgerDirsManager;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.bookie.LedgerEntryPage;
import org.apache.bookkeeper.bookie.StateManager;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageDataFormats.LedgerData;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorage.Batch;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.ThreadRegistry;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang.mutable.MutableLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Single directory implementation of LedgerStorage that uses RocksDB to keep the indexes for entries stored in
* EntryLogs.
*
* <p>This is meant only to be used from {@link DbLedgerStorage}.
*/
public class SingleDirectoryDbLedgerStorage implements CompactableLedgerStorage {
private final EntryLogger entryLogger;
private final LedgerMetadataIndex ledgerIndex;
private final EntryLocationIndex entryLocationIndex;
private final ConcurrentLongHashMap<TransientLedgerInfo> transientLedgerInfoCache;
private final GarbageCollectorThread gcThread;
// Write cache where all new entries are inserted into
protected volatile WriteCache writeCache;
// Write cache that is used to swap with writeCache during flushes
protected volatile WriteCache writeCacheBeingFlushed;
// Cache where we insert entries for speculative reading
private final ReadCache readCache;
private final StampedLock writeCacheRotationLock = new StampedLock();
protected final ReentrantLock flushMutex = new ReentrantLock();
protected final AtomicBoolean hasFlushBeenTriggered = new AtomicBoolean(false);
private final AtomicBoolean isFlushOngoing = new AtomicBoolean(false);
private static String dbStoragerExecutorName = "db-storage";
private final ExecutorService executor = Executors.newSingleThreadExecutor(
new DefaultThreadFactory(dbStoragerExecutorName));
// Executor used to for db index cleanup
private final ScheduledExecutorService cleanupExecutor = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("db-storage-cleanup"));
private final CopyOnWriteArrayList<LedgerDeletionListener> ledgerDeletionListeners = Lists
.newCopyOnWriteArrayList();
private CheckpointSource checkpointSource = CheckpointSource.DEFAULT;
private Checkpoint lastCheckpoint = Checkpoint.MIN;
private final long writeCacheMaxSize;
private final long readCacheMaxSize;
private final int readAheadCacheBatchSize;
private final long readAheadCacheBatchBytesSize;
private final long maxThrottleTimeNanos;
private final DbLedgerStorageStats dbLedgerStorageStats;
private static final long DEFAULT_MAX_THROTTLE_TIME_MILLIS = TimeUnit.SECONDS.toMillis(10);
private final long maxReadAheadBytesSize;
private final Counter flushExecutorTime;
private final boolean singleLedgerDirs;
public SingleDirectoryDbLedgerStorage(ServerConfiguration conf, LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager, LedgerDirsManager indexDirsManager,
EntryLogger entryLogger, StatsLogger statsLogger, ByteBufAllocator allocator,
long writeCacheSize, long readCacheSize, int readAheadCacheBatchSize,
long readAheadCacheBatchBytesSize)
throws IOException {
checkArgument(ledgerDirsManager.getAllLedgerDirs().size() == 1,
"Db implementation only allows for one storage dir");
String ledgerBaseDir = ledgerDirsManager.getAllLedgerDirs().get(0).getPath();
// indexBaseDir default use ledgerBaseDir
String indexBaseDir = ledgerBaseDir;
if (CollectionUtils.isEmpty(indexDirsManager.getAllLedgerDirs())
|| ledgerBaseDir.equals(indexDirsManager.getAllLedgerDirs().get(0).getPath())) {
log.info("indexDir is equals ledgerBaseDir, creating single directory db ledger storage on {}",
indexBaseDir);
} else {
// if indexDir is specified, set new value
indexBaseDir = indexDirsManager.getAllLedgerDirs().get(0).getPath();
log.info("indexDir is specified a separate dir, creating single directory db ledger storage on {}",
indexBaseDir);
}
StatsLogger ledgerIndexDirStatsLogger = statsLogger
.scopeLabel("ledgerDir", ledgerBaseDir)
.scopeLabel("indexDir", indexBaseDir);
this.writeCacheMaxSize = writeCacheSize;
this.writeCache = new WriteCache(allocator, writeCacheMaxSize / 2);
this.writeCacheBeingFlushed = new WriteCache(allocator, writeCacheMaxSize / 2);
this.singleLedgerDirs = conf.getLedgerDirs().length == 1;
readCacheMaxSize = readCacheSize;
this.readAheadCacheBatchSize = readAheadCacheBatchSize;
this.readAheadCacheBatchBytesSize = readAheadCacheBatchBytesSize;
// Do not attempt to perform read-ahead more than half the total size of the cache
maxReadAheadBytesSize = readCacheMaxSize / 2;
long maxThrottleTimeMillis = conf.getLong(DbLedgerStorage.MAX_THROTTLE_TIME_MILLIS,
DEFAULT_MAX_THROTTLE_TIME_MILLIS);
maxThrottleTimeNanos = TimeUnit.MILLISECONDS.toNanos(maxThrottleTimeMillis);
readCache = new ReadCache(allocator, readCacheMaxSize);
ledgerIndex = new LedgerMetadataIndex(conf,
KeyValueStorageRocksDB.factory, indexBaseDir, ledgerIndexDirStatsLogger);
entryLocationIndex = new EntryLocationIndex(conf,
KeyValueStorageRocksDB.factory, indexBaseDir, ledgerIndexDirStatsLogger);
transientLedgerInfoCache = ConcurrentLongHashMap.<TransientLedgerInfo>newBuilder()
.expectedItems(16 * 1024)
.concurrencyLevel(Runtime.getRuntime().availableProcessors() * 2)
.build();
cleanupExecutor.scheduleAtFixedRate(this::cleanupStaleTransientLedgerInfo,
TransientLedgerInfo.LEDGER_INFO_CACHING_TIME_MINUTES,
TransientLedgerInfo.LEDGER_INFO_CACHING_TIME_MINUTES, TimeUnit.MINUTES);
this.entryLogger = entryLogger;
gcThread = new GarbageCollectorThread(conf,
ledgerManager, ledgerDirsManager, this, entryLogger, ledgerIndexDirStatsLogger);
dbLedgerStorageStats = new DbLedgerStorageStats(
ledgerIndexDirStatsLogger,
() -> writeCache.size() + writeCacheBeingFlushed.size(),
() -> writeCache.count() + writeCacheBeingFlushed.count(),
() -> readCache.size(),
() -> readCache.count()
);
flushExecutorTime = ledgerIndexDirStatsLogger.getThreadScopedCounter("db-storage-thread-time");
executor.submit(() -> {
ThreadRegistry.register(dbStoragerExecutorName, 0);
// ensure the metric gets registered on start-up as this thread only executes
// when the write cache is full which may not happen or not for a long time
flushExecutorTime.addLatency(0, TimeUnit.NANOSECONDS);
});
ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener());
if (!ledgerBaseDir.equals(indexBaseDir)) {
indexDirsManager.addLedgerDirsListener(getLedgerDirsListener());
}
}
@Override
public void initialize(ServerConfiguration conf, LedgerManager ledgerManager, LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager, StatsLogger statsLogger,
ByteBufAllocator allocator) throws IOException {
/// Initialized in constructor
}
@Override
public void setStateManager(StateManager stateManager) { }
@Override
public void setCheckpointSource(CheckpointSource checkpointSource) {
this.checkpointSource = checkpointSource;
}
@Override
public void setCheckpointer(Checkpointer checkpointer) { }
/**
* Evict all the ledger info object that were not used recently.
*/
private void cleanupStaleTransientLedgerInfo() {
transientLedgerInfoCache.removeIf((ledgerId, ledgerInfo) -> {
boolean isStale = ledgerInfo.isStale();
if (isStale) {
ledgerInfo.close();
}
return isStale;
});
}
@Override
public void start() {
gcThread.start();
}
@Override
public void forceGC() {
gcThread.enableForceGC();
}
@Override
public void forceGC(boolean forceMajor, boolean forceMinor) {
gcThread.enableForceGC(forceMajor, forceMinor);
}
@Override
public boolean isInForceGC() {
return gcThread.isInForceGC();
}
public void suspendMinorGC() {
gcThread.suspendMinorGC();
}
public void suspendMajorGC() {
gcThread.suspendMajorGC();
}
public void resumeMinorGC() {
gcThread.resumeMinorGC();
}
public void resumeMajorGC() {
gcThread.resumeMajorGC();
}
public boolean isMajorGcSuspended() {
return gcThread.isMajorGcSuspend();
}
public boolean isMinorGcSuspended() {
return gcThread.isMinorGcSuspend();
}
@Override
public void entryLocationCompact() {
if (entryLocationIndex.isCompacting()) {
// RocksDB already running compact.
return;
}
cleanupExecutor.execute(() -> {
// There can only be one single cleanup task running because the cleanupExecutor
// is single-threaded
try {
log.info("Trigger entry location index RocksDB compact.");
entryLocationIndex.compact();
} catch (Throwable t) {
log.warn("Failed to trigger entry location index RocksDB compact", t);
}
});
}
@Override
public boolean isEntryLocationCompacting() {
return entryLocationIndex.isCompacting();
}
@Override
public List<String> getEntryLocationDBPath() {
return Lists.newArrayList(entryLocationIndex.getEntryLocationDBPath());
}
@Override
public void shutdown() throws InterruptedException {
try {
flush();
gcThread.shutdown();
entryLogger.close();
cleanupExecutor.shutdown();
cleanupExecutor.awaitTermination(1, TimeUnit.SECONDS);
ledgerIndex.close();
entryLocationIndex.close();
writeCache.close();
writeCacheBeingFlushed.close();
readCache.close();
executor.shutdown();
} catch (IOException e) {
log.error("Error closing db storage", e);
}
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
try {
LedgerData ledgerData = ledgerIndex.get(ledgerId);
if (log.isDebugEnabled()) {
log.debug("Ledger exists. ledger: {} : {}", ledgerId, ledgerData.getExists());
}
return ledgerData.getExists();
} catch (Bookie.NoLedgerException nle) {
// ledger does not exist
return false;
}
}
@Override
public boolean entryExists(long ledgerId, long entryId) throws IOException, BookieException {
if (entryId == BookieProtocol.LAST_ADD_CONFIRMED) {
return false;
}
// We need to try to read from both write caches, since recent entries could be found in either of the two. The
// write caches are already thread safe on their own, here we just need to make sure we get references to both
// of them. Using an optimistic lock since the read lock is always free, unless we're swapping the caches.
long stamp = writeCacheRotationLock.tryOptimisticRead();
WriteCache localWriteCache = writeCache;
WriteCache localWriteCacheBeingFlushed = writeCacheBeingFlushed;
if (!writeCacheRotationLock.validate(stamp)) {
// Fallback to regular read lock approach
stamp = writeCacheRotationLock.readLock();
try {
localWriteCache = writeCache;
localWriteCacheBeingFlushed = writeCacheBeingFlushed;
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
}
boolean inCache = localWriteCache.hasEntry(ledgerId, entryId)
|| localWriteCacheBeingFlushed.hasEntry(ledgerId, entryId)
|| readCache.hasEntry(ledgerId, entryId);
if (inCache) {
return true;
}
// Read from main storage
long entryLocation = entryLocationIndex.getLocation(ledgerId, entryId);
if (entryLocation != 0) {
return true;
}
// Only a negative result while in limbo equates to unknown
throwIfLimbo(ledgerId);
return false;
}
@Override
public boolean isFenced(long ledgerId) throws IOException, BookieException {
boolean isFenced = ledgerIndex.get(ledgerId).getFenced();
if (log.isDebugEnabled()) {
log.debug("ledger: {}, isFenced: {}.", ledgerId, isFenced);
}
// Only a negative result while in limbo equates to unknown
if (!isFenced) {
throwIfLimbo(ledgerId);
}
return isFenced;
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
if (log.isDebugEnabled()) {
log.debug("Set fenced. ledger: {}", ledgerId);
}
boolean changed = ledgerIndex.setFenced(ledgerId);
if (changed) {
// notify all the watchers if a ledger is fenced
TransientLedgerInfo ledgerInfo = transientLedgerInfoCache.get(ledgerId);
if (null != ledgerInfo) {
ledgerInfo.notifyWatchers(Long.MAX_VALUE);
}
}
return changed;
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
if (log.isDebugEnabled()) {
log.debug("Set master key. ledger: {}", ledgerId);
}
ledgerIndex.setMasterKey(ledgerId, masterKey);
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
if (log.isDebugEnabled()) {
log.debug("Read master key. ledger: {}", ledgerId);
}
return ledgerIndex.get(ledgerId).getMasterKey().toByteArray();
}
@Override
public long addEntry(ByteBuf entry) throws IOException, BookieException {
long startTime = MathUtils.nowInNano();
long ledgerId = entry.getLong(entry.readerIndex());
long entryId = entry.getLong(entry.readerIndex() + 8);
long lac = entry.getLong(entry.readerIndex() + 16);
if (log.isDebugEnabled()) {
log.debug("Add entry. {}@{}, lac = {}", ledgerId, entryId, lac);
}
// First we try to do an optimistic locking to get access to the current write cache.
// This is based on the fact that the write cache is only being rotated (swapped) every 1 minute. During the
// rest of the time, we can have multiple thread using the optimistic lock here without interfering.
long stamp = writeCacheRotationLock.tryOptimisticRead();
boolean inserted = false;
inserted = writeCache.put(ledgerId, entryId, entry);
if (!writeCacheRotationLock.validate(stamp)) {
// The write cache was rotated while we were inserting. We need to acquire the proper read lock and repeat
// the operation because we might have inserted in a write cache that was already being flushed and cleared,
// without being sure about this last entry being flushed or not.
stamp = writeCacheRotationLock.readLock();
try {
inserted = writeCache.put(ledgerId, entryId, entry);
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
}
if (!inserted) {
triggerFlushAndAddEntry(ledgerId, entryId, entry);
}
// after successfully insert the entry, update LAC and notify the watchers
updateCachedLacIfNeeded(ledgerId, lac);
recordSuccessfulEvent(dbLedgerStorageStats.getAddEntryStats(), startTime);
return entryId;
}
private void triggerFlushAndAddEntry(long ledgerId, long entryId, ByteBuf entry)
throws IOException, BookieException {
long throttledStartTime = MathUtils.nowInNano();
dbLedgerStorageStats.getThrottledWriteRequests().inc();
long absoluteTimeoutNanos = System.nanoTime() + maxThrottleTimeNanos;
while (System.nanoTime() < absoluteTimeoutNanos) {
// Write cache is full, we need to trigger a flush so that it gets rotated
// If the flush has already been triggered or flush has already switched the
// cache, we don't need to trigger another flush
if (!isFlushOngoing.get() && hasFlushBeenTriggered.compareAndSet(false, true)) {
// Trigger an early flush in background
log.info("Write cache is full, triggering flush");
executor.execute(() -> {
long startTime = System.nanoTime();
try {
flush();
} catch (IOException e) {
log.error("Error during flush", e);
} finally {
flushExecutorTime.addLatency(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
});
}
long stamp = writeCacheRotationLock.readLock();
try {
if (writeCache.put(ledgerId, entryId, entry)) {
// We succeeded in putting the entry in write cache in the
recordSuccessfulEvent(dbLedgerStorageStats.getThrottledWriteStats(), throttledStartTime);
return;
}
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
// Wait some time and try again
try {
Thread.sleep(1);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted when adding entry " + ledgerId + "@" + entryId);
}
}
// Timeout expired and we weren't able to insert in write cache
dbLedgerStorageStats.getRejectedWriteRequests().inc();
recordFailedEvent(dbLedgerStorageStats.getThrottledWriteStats(), throttledStartTime);
throw new OperationRejectedException();
}
@Override
public ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException {
long startTime = MathUtils.nowInNano();
try {
ByteBuf entry = doGetEntry(ledgerId, entryId);
recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
return entry;
} catch (IOException e) {
recordFailedEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
throw e;
}
}
private ByteBuf doGetEntry(long ledgerId, long entryId) throws IOException, BookieException {
if (log.isDebugEnabled()) {
log.debug("Get Entry: {}@{}", ledgerId, entryId);
}
if (entryId == BookieProtocol.LAST_ADD_CONFIRMED) {
return getLastEntry(ledgerId);
}
// We need to try to read from both write caches, since recent entries could be found in either of the two. The
// write caches are already thread safe on their own, here we just need to make sure we get references to both
// of them. Using an optimistic lock since the read lock is always free, unless we're swapping the caches.
long stamp = writeCacheRotationLock.tryOptimisticRead();
WriteCache localWriteCache = writeCache;
WriteCache localWriteCacheBeingFlushed = writeCacheBeingFlushed;
if (!writeCacheRotationLock.validate(stamp)) {
// Fallback to regular read lock approach
stamp = writeCacheRotationLock.readLock();
try {
localWriteCache = writeCache;
localWriteCacheBeingFlushed = writeCacheBeingFlushed;
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
}
// First try to read from the write cache of recent entries
ByteBuf entry = localWriteCache.get(ledgerId, entryId);
if (entry != null) {
dbLedgerStorageStats.getWriteCacheHitCounter().inc();
return entry;
}
// If there's a flush going on, the entry might be in the flush buffer
entry = localWriteCacheBeingFlushed.get(ledgerId, entryId);
if (entry != null) {
dbLedgerStorageStats.getWriteCacheHitCounter().inc();
return entry;
}
dbLedgerStorageStats.getWriteCacheMissCounter().inc();
// Try reading from read-ahead cache
entry = readCache.get(ledgerId, entryId);
if (entry != null) {
dbLedgerStorageStats.getReadCacheHitCounter().inc();
return entry;
}
dbLedgerStorageStats.getReadCacheMissCounter().inc();
// Read from main storage
long entryLocation;
long locationIndexStartNano = MathUtils.nowInNano();
try {
entryLocation = entryLocationIndex.getLocation(ledgerId, entryId);
if (entryLocation == 0) {
// Only a negative result while in limbo equates to unknown
throwIfLimbo(ledgerId);
throw new NoEntryException(ledgerId, entryId);
}
} finally {
dbLedgerStorageStats.getReadFromLocationIndexTime().addLatency(
MathUtils.elapsedNanos(locationIndexStartNano), TimeUnit.NANOSECONDS);
}
long readEntryStartNano = MathUtils.nowInNano();
try {
entry = entryLogger.readEntry(ledgerId, entryId, entryLocation);
} finally {
dbLedgerStorageStats.getReadFromEntryLogTime().addLatency(
MathUtils.elapsedNanos(readEntryStartNano), TimeUnit.NANOSECONDS);
}
readCache.put(ledgerId, entryId, entry);
// Try to read more entries
long nextEntryLocation = entryLocation + 4 /* size header */ + entry.readableBytes();
fillReadAheadCache(ledgerId, entryId + 1, nextEntryLocation);
return entry;
}
private void fillReadAheadCache(long orginalLedgerId, long firstEntryId, long firstEntryLocation) {
long readAheadStartNano = MathUtils.nowInNano();
int count = 0;
long size = 0;
try {
long firstEntryLogId = (firstEntryLocation >> 32);
long currentEntryLogId = firstEntryLogId;
long currentEntryLocation = firstEntryLocation;
while (chargeReadAheadCache(count, size) && currentEntryLogId == firstEntryLogId) {
ByteBuf entry = entryLogger.readEntry(orginalLedgerId,
firstEntryId, currentEntryLocation);
try {
long currentEntryLedgerId = entry.getLong(0);
long currentEntryId = entry.getLong(8);
if (currentEntryLedgerId != orginalLedgerId) {
// Found an entry belonging to a different ledger, stopping read-ahead
break;
}
// Insert entry in read cache
readCache.put(orginalLedgerId, currentEntryId, entry);
count++;
firstEntryId++;
size += entry.readableBytes();
currentEntryLocation += 4 + entry.readableBytes();
currentEntryLogId = currentEntryLocation >> 32;
} finally {
ReferenceCountUtil.release(entry);
}
}
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug("Exception during read ahead for ledger: {}: e", orginalLedgerId, e);
}
} finally {
dbLedgerStorageStats.getReadAheadBatchCountStats().registerSuccessfulValue(count);
dbLedgerStorageStats.getReadAheadBatchSizeStats().registerSuccessfulValue(size);
dbLedgerStorageStats.getReadAheadTime().addLatency(
MathUtils.elapsedNanos(readAheadStartNano), TimeUnit.NANOSECONDS);
}
}
protected boolean chargeReadAheadCache(int currentReadAheadCount, long currentReadAheadBytes) {
// compatible with old logic
boolean chargeSizeCondition = currentReadAheadCount < readAheadCacheBatchSize
&& currentReadAheadBytes < maxReadAheadBytesSize;
if (chargeSizeCondition && readAheadCacheBatchBytesSize > 0) {
// exact limits limit the size and count for each batch
chargeSizeCondition = currentReadAheadBytes < readAheadCacheBatchBytesSize;
}
return chargeSizeCondition;
}
public ByteBuf getLastEntry(long ledgerId) throws IOException, BookieException {
throwIfLimbo(ledgerId);
long stamp = writeCacheRotationLock.readLock();
try {
// First try to read from the write cache of recent entries
ByteBuf entry = writeCache.getLastEntry(ledgerId);
if (entry != null) {
if (log.isDebugEnabled()) {
long foundLedgerId = entry.readLong(); // ledgedId
long entryId = entry.readLong();
entry.resetReaderIndex();
if (log.isDebugEnabled()) {
log.debug("Found last entry for ledger {} in write cache: {}@{}", ledgerId, foundLedgerId,
entryId);
}
}
dbLedgerStorageStats.getWriteCacheHitCounter().inc();
return entry;
}
// If there's a flush going on, the entry might be in the flush buffer
entry = writeCacheBeingFlushed.getLastEntry(ledgerId);
if (entry != null) {
if (log.isDebugEnabled()) {
entry.readLong(); // ledgedId
long entryId = entry.readLong();
entry.resetReaderIndex();
if (log.isDebugEnabled()) {
log.debug("Found last entry for ledger {} in write cache being flushed: {}", ledgerId, entryId);
}
}
dbLedgerStorageStats.getWriteCacheHitCounter().inc();
return entry;
}
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
dbLedgerStorageStats.getWriteCacheMissCounter().inc();
// Search the last entry in storage
long locationIndexStartNano = MathUtils.nowInNano();
long lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
if (log.isDebugEnabled()) {
log.debug("Found last entry for ledger {} in db: {}", ledgerId, lastEntryId);
}
long entryLocation = entryLocationIndex.getLocation(ledgerId, lastEntryId);
dbLedgerStorageStats.getReadFromLocationIndexTime().addLatency(
MathUtils.elapsedNanos(locationIndexStartNano), TimeUnit.NANOSECONDS);
long readEntryStartNano = MathUtils.nowInNano();
ByteBuf content = entryLogger.readEntry(ledgerId, lastEntryId, entryLocation);
dbLedgerStorageStats.getReadFromEntryLogTime().addLatency(
MathUtils.elapsedNanos(readEntryStartNano), TimeUnit.NANOSECONDS);
return content;
}
@VisibleForTesting
boolean isFlushRequired() {
long stamp = writeCacheRotationLock.readLock();
try {
return !writeCache.isEmpty();
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
}
@Override
public void checkpoint(Checkpoint checkpoint) throws IOException {
Checkpoint thisCheckpoint = checkpointSource.newCheckpoint();
if (lastCheckpoint.compareTo(checkpoint) > 0) {
return;
}
// Only a single flush operation can happen at a time
flushMutex.lock();
long startTime = -1;
try {
startTime = MathUtils.nowInNano();
} catch (Throwable e) {
// Fix spotbugs warning. Should never happen
flushMutex.unlock();
throw new IOException(e);
}
try {
if (writeCache.isEmpty()) {
return;
}
// Swap the write cache so that writes can continue to happen while the flush is
// ongoing
swapWriteCache();
long sizeToFlush = writeCacheBeingFlushed.size();
if (log.isDebugEnabled()) {
log.debug("Flushing entries. count: {} -- size {} Mb", writeCacheBeingFlushed.count(),
sizeToFlush / 1024.0 / 1024);
}
// Write all the pending entries into the entry logger and collect the offset
// position for each entry
Batch batch = entryLocationIndex.newBatch();
writeCacheBeingFlushed.forEach((ledgerId, entryId, entry) -> {
long location = entryLogger.addEntry(ledgerId, entry);
entryLocationIndex.addLocation(batch, ledgerId, entryId, location);
});
long entryLoggerStart = MathUtils.nowInNano();
entryLogger.flush();
recordSuccessfulEvent(dbLedgerStorageStats.getFlushEntryLogStats(), entryLoggerStart);
long batchFlushStartTime = MathUtils.nowInNano();
batch.flush();
batch.close();
recordSuccessfulEvent(dbLedgerStorageStats.getFlushLocationIndexStats(), batchFlushStartTime);
if (log.isDebugEnabled()) {
log.debug("DB batch flushed time : {} s",
MathUtils.elapsedNanos(batchFlushStartTime) / (double) TimeUnit.SECONDS.toNanos(1));
}
long ledgerIndexStartTime = MathUtils.nowInNano();
ledgerIndex.flush();
recordSuccessfulEvent(dbLedgerStorageStats.getFlushLedgerIndexStats(), ledgerIndexStartTime);
lastCheckpoint = thisCheckpoint;
// Discard all the entry from the write cache, since they're now persisted
writeCacheBeingFlushed.clear();
double flushTimeSeconds = MathUtils.elapsedNanos(startTime) / (double) TimeUnit.SECONDS.toNanos(1);
double flushThroughput = sizeToFlush / 1024.0 / 1024.0 / flushTimeSeconds;
if (log.isDebugEnabled()) {
log.debug("Flushing done time {} s -- Written {} MB/s", flushTimeSeconds, flushThroughput);
}
recordSuccessfulEvent(dbLedgerStorageStats.getFlushStats(), startTime);
dbLedgerStorageStats.getFlushSizeStats().registerSuccessfulValue(sizeToFlush);
} catch (IOException e) {
recordFailedEvent(dbLedgerStorageStats.getFlushStats(), startTime);
// Leave IOExecption as it is
throw e;
} finally {
try {
cleanupExecutor.execute(() -> {
// There can only be one single cleanup task running because the cleanupExecutor
// is single-threaded
try {
if (log.isDebugEnabled()) {
log.debug("Removing deleted ledgers from db indexes");
}
entryLocationIndex.removeOffsetFromDeletedLedgers();
ledgerIndex.removeDeletedLedgers();
} catch (Throwable t) {
log.warn("Failed to cleanup db indexes", t);
}
});
isFlushOngoing.set(false);
} finally {
flushMutex.unlock();
}
}
}
/**
* Swap the current write cache with the replacement cache.
*/
private void swapWriteCache() {
long stamp = writeCacheRotationLock.writeLock();
try {
// First, swap the current write-cache map with an empty one so that writes will
// go on unaffected. Only a single flush is happening at the same time
WriteCache tmp = writeCacheBeingFlushed;
writeCacheBeingFlushed = writeCache;
writeCache = tmp;
// since the cache is switched, we can allow flush to be triggered
hasFlushBeenTriggered.set(false);
} finally {
try {
isFlushOngoing.set(true);
} finally {
writeCacheRotationLock.unlockWrite(stamp);
}
}
}
@Override
public void flush() throws IOException {
Checkpoint cp = checkpointSource.newCheckpoint();
checkpoint(cp);
if (singleLedgerDirs) {
checkpointSource.checkpointComplete(cp, true);
}
}
@Override
public void deleteLedger(long ledgerId) throws IOException {
if (log.isDebugEnabled()) {
log.debug("Deleting ledger {}", ledgerId);
}
// Delete entries from this ledger that are still in the write cache
long stamp = writeCacheRotationLock.readLock();
try {
writeCache.deleteLedger(ledgerId);
} finally {
writeCacheRotationLock.unlockRead(stamp);
}
entryLocationIndex.delete(ledgerId);
ledgerIndex.delete(ledgerId);
for (int i = 0, size = ledgerDeletionListeners.size(); i < size; i++) {
LedgerDeletionListener listener = ledgerDeletionListeners.get(i);
listener.ledgerDeleted(ledgerId);
}
TransientLedgerInfo tli = transientLedgerInfoCache.remove(ledgerId);
if (tli != null) {
tli.close();
}
}
@Override
public Iterable<Long> getActiveLedgersInRange(long firstLedgerId, long lastLedgerId) throws IOException {
return ledgerIndex.getActiveLedgersInRange(firstLedgerId, lastLedgerId);
}
@Override
public void updateEntriesLocations(Iterable<EntryLocation> locations) throws IOException {
// Before updating the DB with the new location for the compacted entries, we need to
// make sure that there is no ongoing flush() operation.
// If there were a flush, we could have the following situation, which is highly
// unlikely though possible:
// 1. Flush operation has written the write-cache content into entry-log files
// 2. The DB location index is not yet updated
// 3. Compaction is triggered and starts compacting some of the recent files
// 4. Compaction will write the "new location" into the DB
// 5. The pending flush() will overwrite the DB with the "old location", pointing
// to a file that no longer exists
//
// To avoid this race condition, we need that all the entries that are potentially
// included in the compaction round to have all the indexes already flushed into
// the DB.
// The easiest lightweight way to achieve this is to wait for any pending
// flush operation to be completed before updating the index with the compacted
// entries, by blocking on the flushMutex.
flushMutex.lock();
flushMutex.unlock();
// We don't need to keep the flush mutex locked here while updating the DB.
// It's fine to have a concurrent flush operation at this point, because we
// know that none of the entries being flushed was included in the compaction
// round that we are dealing with.
entryLocationIndex.updateLocations(locations);
}
@VisibleForTesting
EntryLogger getEntryLogger() {
return entryLogger;
}
@Override
public long getLastAddConfirmed(long ledgerId) throws IOException, BookieException {
throwIfLimbo(ledgerId);
TransientLedgerInfo ledgerInfo = transientLedgerInfoCache.get(ledgerId);
long lac = null != ledgerInfo ? ledgerInfo.getLastAddConfirmed() : TransientLedgerInfo.NOT_ASSIGNED_LAC;
if (lac == TransientLedgerInfo.NOT_ASSIGNED_LAC) {
ByteBuf bb = getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED);
try {
bb.skipBytes(2 * Long.BYTES); // skip ledger id and entry id
lac = bb.readLong();
lac = getOrAddLedgerInfo(ledgerId).setLastAddConfirmed(lac);
} finally {
ReferenceCountUtil.release(bb);
}
}
return lac;
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId, long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
return getOrAddLedgerInfo(ledgerId).waitForLastAddConfirmedUpdate(previousLAC, watcher);
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
getOrAddLedgerInfo(ledgerId).cancelWaitForLastAddConfirmedUpdate(watcher);
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
TransientLedgerInfo ledgerInfo = getOrAddLedgerInfo(ledgerId);
ledgerInfo.setExplicitLac(lac);
ledgerIndex.setExplicitLac(ledgerId, lac);
ledgerInfo.notifyWatchers(Long.MAX_VALUE);
}
@Override
public ByteBuf getExplicitLac(long ledgerId) throws IOException, BookieException {
throwIfLimbo(ledgerId);
if (log.isDebugEnabled()) {
log.debug("getExplicitLac ledger {}", ledgerId);
}
TransientLedgerInfo ledgerInfo = getOrAddLedgerInfo(ledgerId);
if (ledgerInfo.getExplicitLac() != null) {
if (log.isDebugEnabled()) {
log.debug("getExplicitLac ledger {} returned from TransientLedgerInfo", ledgerId);
}
return ledgerInfo.getExplicitLac();
}
LedgerData ledgerData = ledgerIndex.get(ledgerId);
if (!ledgerData.hasExplicitLac()) {
if (log.isDebugEnabled()) {
log.debug("getExplicitLac ledger {} missing from LedgerData", ledgerId);
}
return null;
}
if (log.isDebugEnabled()) {
log.debug("getExplicitLac ledger {} returned from LedgerData", ledgerId);
}
ByteString persistedLac = ledgerData.getExplicitLac();
ledgerInfo.setExplicitLac(Unpooled.wrappedBuffer(persistedLac.toByteArray()));
return ledgerInfo.getExplicitLac();
}
private TransientLedgerInfo getOrAddLedgerInfo(long ledgerId) {
return transientLedgerInfoCache.computeIfAbsent(ledgerId, l -> {
return new TransientLedgerInfo(l, ledgerIndex);
});
}
private void updateCachedLacIfNeeded(long ledgerId, long lac) {
TransientLedgerInfo tli = transientLedgerInfoCache.get(ledgerId);
if (tli != null) {
tli.setLastAddConfirmed(lac);
}
}
@Override
public void flushEntriesLocationsIndex() throws IOException {
// No-op. Location index is already flushed in updateEntriesLocations() call
}
/**
* Add an already existing ledger to the index.
*
* <p>This method is only used as a tool to help the migration from InterleaveLedgerStorage to DbLedgerStorage
*
* @param ledgerId
* the ledger id
* @param pages
* Iterator over index pages from Indexed
* @return the number of
*/
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public long addLedgerToIndex(long ledgerId, boolean isFenced, byte[] masterKey,
LedgerCache.PageEntriesIterable pages) throws Exception {
LedgerData ledgerData = LedgerData.newBuilder().setExists(true).setFenced(isFenced)
.setMasterKey(ByteString.copyFrom(masterKey)).build();
ledgerIndex.set(ledgerId, ledgerData);
MutableLong numberOfEntries = new MutableLong();
// Iterate over all the entries pages
Batch batch = entryLocationIndex.newBatch();
for (LedgerCache.PageEntries page: pages) {
try (LedgerEntryPage lep = page.getLEP()) {
lep.getEntries((entryId, location) -> {
entryLocationIndex.addLocation(batch, ledgerId, entryId, location);
numberOfEntries.increment();
return true;
});
}
}
ledgerIndex.flush();
batch.flush();
batch.close();
return numberOfEntries.longValue();
}
@Override
public void registerLedgerDeletionListener(LedgerDeletionListener listener) {
ledgerDeletionListeners.add(listener);
}
public EntryLocationIndex getEntryLocationIndex() {
return entryLocationIndex;
}
private void recordSuccessfulEvent(OpStatsLogger logger, long startTimeNanos) {
logger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
private void recordFailedEvent(OpStatsLogger logger, long startTimeNanos) {
logger.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
@Override
public List<GarbageCollectionStatus> getGarbageCollectionStatus() {
return Collections.singletonList(gcThread.getGarbageCollectionStatus());
}
/**
* Interface which process ledger logger.
*/
public interface LedgerLoggerProcessor {
void process(long entryId, long entryLogId, long position);
}
private static final Logger log = LoggerFactory.getLogger(SingleDirectoryDbLedgerStorage.class);
@Override
public OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"getListOfEntriesOfLedger method is currently unsupported for SingleDirectoryDbLedgerStorage");
}
private LedgerDirsManager.LedgerDirsListener getLedgerDirsListener() {
return new LedgerDirsListener() {
@Override
public void diskAlmostFull(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace()) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
}
}
@Override
public void diskFull(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace()) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
gcThread.suspendMinorGC();
}
}
@Override
public void allDisksFull(boolean highPriorityWritesAllowed) {
if (gcThread.isForceGCAllowWhenNoSpace()) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
gcThread.suspendMinorGC();
}
}
@Override
public void diskWritable(File disk) {
// we have enough space now
if (gcThread.isForceGCAllowWhenNoSpace()) {
// disable force gc.
gcThread.disableForceGC();
} else {
// resume compaction to normal.
gcThread.resumeMajorGC();
gcThread.resumeMinorGC();
}
}
@Override
public void diskJustWritable(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace()) {
// if a disk is just writable, we still need force gc.
gcThread.enableForceGC();
} else {
// still under warn threshold, only resume minor compaction.
gcThread.resumeMinorGC();
}
}
};
}
@Override
public void setLimboState(long ledgerId) throws IOException {
if (log.isDebugEnabled()) {
log.debug("setLimboState. ledger: {}", ledgerId);
}
ledgerIndex.setLimbo(ledgerId);
}
@Override
public boolean hasLimboState(long ledgerId) throws IOException {
if (log.isDebugEnabled()) {
log.debug("hasLimboState. ledger: {}", ledgerId);
}
return ledgerIndex.get(ledgerId).getLimbo();
}
@Override
public void clearLimboState(long ledgerId) throws IOException {
if (log.isDebugEnabled()) {
log.debug("clearLimboState. ledger: {}", ledgerId);
}
ledgerIndex.clearLimbo(ledgerId);
}
private void throwIfLimbo(long ledgerId) throws IOException, BookieException {
if (hasLimboState(ledgerId)) {
if (log.isDebugEnabled()) {
log.debug("Accessing ledger({}) in limbo state, throwing exception", ledgerId);
}
throw BookieException.create(BookieException.Code.DataUnknownException);
}
}
/**
* Mapping of enums to bitmaps. The bitmaps must not overlap so that we can
* do bitwise operations on them.
*/
private static final Map<StorageState, Integer> stateBitmaps = ImmutableMap.of(
StorageState.NEEDS_INTEGRITY_CHECK, 0x00000001);
@Override
public EnumSet<StorageState> getStorageStateFlags() throws IOException {
int flags = ledgerIndex.getStorageStateFlags();
EnumSet<StorageState> flagsEnum = EnumSet.noneOf(StorageState.class);
for (Map.Entry<StorageState, Integer> e : stateBitmaps.entrySet()) {
int value = e.getValue();
if ((flags & value) == value) {
flagsEnum.add(e.getKey());
}
flags = flags & ~value;
}
checkState(flags == 0, "Unknown storage state flag found " + flags);
return flagsEnum;
}
@Override
public void setStorageStateFlag(StorageState flag) throws IOException {
checkArgument(stateBitmaps.containsKey(flag), "Unsupported flag " + flag);
int flagInt = stateBitmaps.get(flag);
while (true) {
int curFlags = ledgerIndex.getStorageStateFlags();
int newFlags = curFlags | flagInt;
if (ledgerIndex.setStorageStateFlags(curFlags, newFlags)) {
return;
} else {
log.info("Conflict updating storage state flags {} -> {}, retrying",
curFlags, newFlags);
}
}
}
@Override
public void clearStorageStateFlag(StorageState flag) throws IOException {
checkArgument(stateBitmaps.containsKey(flag), "Unsupported flag " + flag);
int flagInt = stateBitmaps.get(flag);
while (true) {
int curFlags = ledgerIndex.getStorageStateFlags();
int newFlags = curFlags & ~flagInt;
if (ledgerIndex.setStorageStateFlags(curFlags, newFlags)) {
return;
} else {
log.info("Conflict updating storage state flags {} -> {}, retrying",
curFlags, newFlags);
}
}
}
@VisibleForTesting
DbLedgerStorageStats getDbLedgerStorageStats() {
return dbLedgerStorageStats;
}
}
| 548 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/DbLedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import static com.google.common.base.Preconditions.checkNotNull;
// CHECKSTYLE.OFF: IllegalImport
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.concurrent.DefaultThreadFactory;
import io.netty.util.internal.PlatformDependent;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.CheckpointSource;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.Checkpointer;
import org.apache.bookkeeper.bookie.DefaultEntryLogger;
import org.apache.bookkeeper.bookie.GarbageCollectionStatus;
import org.apache.bookkeeper.bookie.LastAddConfirmedUpdateNotification;
import org.apache.bookkeeper.bookie.LedgerCache;
import org.apache.bookkeeper.bookie.LedgerDirsManager;
import org.apache.bookkeeper.bookie.LedgerStorage;
import org.apache.bookkeeper.bookie.StateManager;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.bookie.storage.directentrylogger.DirectEntryLogger;
import org.apache.bookkeeper.bookie.storage.directentrylogger.EntryLogIdsImpl;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.LedgerLoggerProcessor;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.common.util.nativeio.NativeIOImpl;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.slogger.slf4j.Slf4jSlogger;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.commons.lang3.StringUtils;
// CHECKSTYLE.ON: IllegalImport
/**
* Implementation of LedgerStorage that uses RocksDB to keep the indexes for entries stored in EntryLogs.
*/
@Slf4j
public class DbLedgerStorage implements LedgerStorage {
public static final String WRITE_CACHE_MAX_SIZE_MB = "dbStorage_writeCacheMaxSizeMb";
public static final String READ_AHEAD_CACHE_MAX_SIZE_MB = "dbStorage_readAheadCacheMaxSizeMb";
public static final String DIRECT_IO_ENTRYLOGGER = "dbStorage_directIOEntryLogger";
public static final String DIRECT_IO_ENTRYLOGGER_TOTAL_WRITEBUFFER_SIZE_MB =
"dbStorage_directIOEntryLoggerTotalWriteBufferSizeMB";
public static final String DIRECT_IO_ENTRYLOGGER_TOTAL_READBUFFER_SIZE_MB =
"dbStorage_directIOEntryLoggerTotalReadBufferSizeMB";
public static final String DIRECT_IO_ENTRYLOGGER_READBUFFER_SIZE_MB =
"dbStorage_directIOEntryLoggerReadBufferSizeMB";
public static final String DIRECT_IO_ENTRYLOGGER_MAX_FD_CACHE_TIME_SECONDS =
"dbStorage_directIOEntryLoggerMaxFdCacheTimeSeconds";
static final String MAX_THROTTLE_TIME_MILLIS = "dbStorage_maxThrottleTimeMs";
private static final int MB = 1024 * 1024;
private static final long DEFAULT_WRITE_CACHE_MAX_SIZE_MB =
(long) (0.25 * PlatformDependent.estimateMaxDirectMemory()) / MB;
private static final long DEFAULT_READ_CACHE_MAX_SIZE_MB =
(long) (0.25 * PlatformDependent.estimateMaxDirectMemory()) / MB;
static final String READ_AHEAD_CACHE_BATCH_SIZE = "dbStorage_readAheadCacheBatchSize";
static final String READ_AHEAD_CACHE_BATCH_BYTES_SIZE = "dbStorage_readAheadCacheBatchBytesSize";
private static final int DEFAULT_READ_AHEAD_CACHE_BATCH_SIZE = 100;
// the default value is -1. this feature(limit of read ahead bytes) is disabled
private static final int DEFAULT_READ_AHEAD_CACHE_BATCH_BYTES_SIZE = -1;
private static final long DEFAULT_DIRECT_IO_TOTAL_WRITEBUFFER_SIZE_MB =
(long) (0.125 * PlatformDependent.estimateMaxDirectMemory())
/ MB;
private static final long DEFAULT_DIRECT_IO_TOTAL_READBUFFER_SIZE_MB =
(long) (0.125 * PlatformDependent.estimateMaxDirectMemory())
/ MB;
private static final long DEFAULT_DIRECT_IO_READBUFFER_SIZE_MB = 8;
private static final int DEFAULT_DIRECT_IO_MAX_FD_CACHE_TIME_SECONDS = 300;
// use the storage assigned to ledger 0 for flags.
// if the storage configuration changes, the flags may be lost
// but in that case data integrity should kick off anyhow.
private static final long STORAGE_FLAGS_KEY = 0L;
private int numberOfDirs;
private List<SingleDirectoryDbLedgerStorage> ledgerStorageList;
private ExecutorService entryLoggerWriteExecutor = null;
private ExecutorService entryLoggerFlushExecutor = null;
protected ByteBufAllocator allocator;
// parent DbLedgerStorage stats (not per directory)
private static final String MAX_READAHEAD_BATCH_SIZE = "readahead-max-batch-size";
private static final String MAX_WRITE_CACHE_SIZE = "write-cache-max-size";
@StatsDoc(
name = MAX_READAHEAD_BATCH_SIZE,
help = "the configured readahead batch size"
)
private Gauge<Integer> readaheadBatchSizeGauge;
@StatsDoc(
name = MAX_WRITE_CACHE_SIZE,
help = "the configured write cache size"
)
private Gauge<Long> writeCacheSizeGauge;
@Override
public void initialize(ServerConfiguration conf, LedgerManager ledgerManager, LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager, StatsLogger statsLogger, ByteBufAllocator allocator)
throws IOException {
long writeCacheMaxSize = getLongVariableOrDefault(conf, WRITE_CACHE_MAX_SIZE_MB,
DEFAULT_WRITE_CACHE_MAX_SIZE_MB) * MB;
long readCacheMaxSize = getLongVariableOrDefault(conf, READ_AHEAD_CACHE_MAX_SIZE_MB,
DEFAULT_READ_CACHE_MAX_SIZE_MB) * MB;
boolean directIOEntryLogger = getBooleanVariableOrDefault(conf, DIRECT_IO_ENTRYLOGGER, false);
this.allocator = allocator;
this.numberOfDirs = ledgerDirsManager.getAllLedgerDirs().size();
log.info("Started Db Ledger Storage");
log.info(" - Number of directories: {}", numberOfDirs);
log.info(" - Write cache size: {} MB", writeCacheMaxSize / MB);
log.info(" - Read Cache: {} MB", readCacheMaxSize / MB);
if (readCacheMaxSize + writeCacheMaxSize > PlatformDependent.estimateMaxDirectMemory()) {
throw new IOException("Read and write cache sizes exceed the configured max direct memory size");
}
if (ledgerDirsManager.getAllLedgerDirs().size() != indexDirsManager.getAllLedgerDirs().size()) {
throw new IOException("ledger and index dirs size not matched");
}
long perDirectoryWriteCacheSize = writeCacheMaxSize / numberOfDirs;
long perDirectoryReadCacheSize = readCacheMaxSize / numberOfDirs;
int readAheadCacheBatchSize = conf.getInt(READ_AHEAD_CACHE_BATCH_SIZE, DEFAULT_READ_AHEAD_CACHE_BATCH_SIZE);
long readAheadCacheBatchBytesSize = conf.getInt(READ_AHEAD_CACHE_BATCH_BYTES_SIZE,
DEFAULT_READ_AHEAD_CACHE_BATCH_BYTES_SIZE);
ledgerStorageList = Lists.newArrayList();
for (int i = 0; i < ledgerDirsManager.getAllLedgerDirs().size(); i++) {
File ledgerDir = ledgerDirsManager.getAllLedgerDirs().get(i);
File indexDir = indexDirsManager.getAllLedgerDirs().get(i);
// Create a ledger dirs manager for the single directory
File[] lDirs = new File[1];
// Remove the `/current` suffix which will be appended again by LedgersDirManager
lDirs[0] = ledgerDir.getParentFile();
LedgerDirsManager ldm = new LedgerDirsManager(conf, lDirs, ledgerDirsManager.getDiskChecker(),
NullStatsLogger.INSTANCE);
// Create a index dirs manager for the single directory
File[] iDirs = new File[1];
// Remove the `/current` suffix which will be appended again by LedgersDirManager
iDirs[0] = indexDir.getParentFile();
LedgerDirsManager idm = new LedgerDirsManager(conf, iDirs, indexDirsManager.getDiskChecker(),
NullStatsLogger.INSTANCE);
EntryLogger entrylogger;
if (directIOEntryLogger) {
long perDirectoryTotalWriteBufferSize = MB * getLongVariableOrDefault(
conf,
DIRECT_IO_ENTRYLOGGER_TOTAL_WRITEBUFFER_SIZE_MB,
DEFAULT_DIRECT_IO_TOTAL_WRITEBUFFER_SIZE_MB) / numberOfDirs;
long perDirectoryTotalReadBufferSize = MB * getLongVariableOrDefault(
conf,
DIRECT_IO_ENTRYLOGGER_TOTAL_READBUFFER_SIZE_MB,
DEFAULT_DIRECT_IO_TOTAL_READBUFFER_SIZE_MB) / numberOfDirs;
int readBufferSize = MB * (int) getLongVariableOrDefault(
conf,
DIRECT_IO_ENTRYLOGGER_READBUFFER_SIZE_MB,
DEFAULT_DIRECT_IO_READBUFFER_SIZE_MB);
int maxFdCacheTimeSeconds = (int) getLongVariableOrDefault(
conf,
DIRECT_IO_ENTRYLOGGER_MAX_FD_CACHE_TIME_SECONDS,
DEFAULT_DIRECT_IO_MAX_FD_CACHE_TIME_SECONDS);
Slf4jSlogger slog = new Slf4jSlogger(DbLedgerStorage.class);
entryLoggerWriteExecutor = Executors.newSingleThreadExecutor(
new DefaultThreadFactory("EntryLoggerWrite"));
entryLoggerFlushExecutor = Executors.newSingleThreadExecutor(
new DefaultThreadFactory("EntryLoggerFlush"));
int numReadThreads = conf.getNumReadWorkerThreads();
if (numReadThreads == 0) {
numReadThreads = conf.getServerNumIOThreads();
}
entrylogger = new DirectEntryLogger(ledgerDir, new EntryLogIdsImpl(ldm, slog),
new NativeIOImpl(),
allocator, entryLoggerWriteExecutor, entryLoggerFlushExecutor,
conf.getEntryLogSizeLimit(),
conf.getNettyMaxFrameSizeBytes() - 500,
perDirectoryTotalWriteBufferSize,
perDirectoryTotalReadBufferSize,
readBufferSize,
numReadThreads,
maxFdCacheTimeSeconds,
slog, statsLogger);
} else {
entrylogger = new DefaultEntryLogger(conf, ldm, null, statsLogger, allocator);
}
ledgerStorageList.add(newSingleDirectoryDbLedgerStorage(conf, ledgerManager, ldm,
idm, entrylogger,
statsLogger, perDirectoryWriteCacheSize,
perDirectoryReadCacheSize,
readAheadCacheBatchSize, readAheadCacheBatchBytesSize));
ldm.getListeners().forEach(ledgerDirsManager::addLedgerDirsListener);
if (!lDirs[0].getPath().equals(iDirs[0].getPath())) {
idm.getListeners().forEach(indexDirsManager::addLedgerDirsListener);
}
}
// parent DbLedgerStorage stats (not per directory)
readaheadBatchSizeGauge = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return readAheadCacheBatchSize;
}
@Override
public Integer getSample() {
return readAheadCacheBatchSize;
}
};
statsLogger.registerGauge(MAX_READAHEAD_BATCH_SIZE, readaheadBatchSizeGauge);
writeCacheSizeGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return perDirectoryWriteCacheSize;
}
@Override
public Long getSample() {
return perDirectoryWriteCacheSize;
}
};
statsLogger.registerGauge(MAX_WRITE_CACHE_SIZE, writeCacheSizeGauge);
}
@VisibleForTesting
protected SingleDirectoryDbLedgerStorage newSingleDirectoryDbLedgerStorage(ServerConfiguration conf,
LedgerManager ledgerManager, LedgerDirsManager ledgerDirsManager, LedgerDirsManager indexDirsManager,
EntryLogger entryLogger, StatsLogger statsLogger, long writeCacheSize, long readCacheSize,
int readAheadCacheBatchSize, long readAheadCacheBatchBytesSize)
throws IOException {
return new SingleDirectoryDbLedgerStorage(conf, ledgerManager, ledgerDirsManager, indexDirsManager, entryLogger,
statsLogger, allocator, writeCacheSize, readCacheSize,
readAheadCacheBatchSize, readAheadCacheBatchBytesSize);
}
@Override
public void setStateManager(StateManager stateManager) {
ledgerStorageList.forEach(s -> s.setStateManager(stateManager));
}
@Override
public void setCheckpointSource(CheckpointSource checkpointSource) {
ledgerStorageList.forEach(s -> s.setCheckpointSource(checkpointSource));
}
@Override
public void setCheckpointer(Checkpointer checkpointer) {
ledgerStorageList.forEach(s -> s.setCheckpointer(checkpointer));
}
@Override
public void start() {
ledgerStorageList.forEach(LedgerStorage::start);
}
@Override
public void shutdown() throws InterruptedException {
for (LedgerStorage ls : ledgerStorageList) {
ls.shutdown();
}
if (entryLoggerWriteExecutor != null) {
entryLoggerWriteExecutor.shutdown();
}
if (entryLoggerFlushExecutor != null) {
entryLoggerFlushExecutor.shutdown();
}
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
return getLedgerStorage(ledgerId).ledgerExists(ledgerId);
}
@Override
public boolean entryExists(long ledgerId, long entryId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).entryExists(ledgerId, entryId);
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
return getLedgerStorage(ledgerId).setFenced(ledgerId);
}
@Override
public boolean isFenced(long ledgerId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).isFenced(ledgerId);
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
getLedgerStorage(ledgerId).setMasterKey(ledgerId, masterKey);
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).readMasterKey(ledgerId);
}
@Override
public long addEntry(ByteBuf entry) throws IOException, BookieException {
long ledgerId = entry.getLong(entry.readerIndex());
return getLedgerStorage(ledgerId).addEntry(entry);
}
@Override
public ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).getEntry(ledgerId, entryId);
}
@Override
public long getLastAddConfirmed(long ledgerId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).getLastAddConfirmed(ledgerId);
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId, long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
return getLedgerStorage(ledgerId).waitForLastAddConfirmedUpdate(ledgerId, previousLAC, watcher);
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
getLedgerStorage(ledgerId).cancelWaitForLastAddConfirmedUpdate(ledgerId, watcher);
}
@Override
public void flush() throws IOException {
for (LedgerStorage ls : ledgerStorageList) {
ls.flush();
}
}
@Override
public void checkpoint(Checkpoint checkpoint) throws IOException {
for (LedgerStorage ls : ledgerStorageList) {
ls.checkpoint(checkpoint);
}
}
@Override
public void deleteLedger(long ledgerId) throws IOException {
getLedgerStorage(ledgerId).deleteLedger(ledgerId);
}
@Override
public void registerLedgerDeletionListener(LedgerDeletionListener listener) {
ledgerStorageList.forEach(ls -> ls.registerLedgerDeletionListener(listener));
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
getLedgerStorage(ledgerId).setExplicitLac(ledgerId, lac);
}
@Override
public ByteBuf getExplicitLac(long ledgerId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).getExplicitLac(ledgerId);
}
public long addLedgerToIndex(long ledgerId, boolean isFenced, byte[] masterKey,
LedgerCache.PageEntriesIterable pages) throws Exception {
return getLedgerStorage(ledgerId).addLedgerToIndex(ledgerId, isFenced, masterKey, pages);
}
public long getLastEntryInLedger(long ledgerId) throws IOException {
return getLedgerStorage(ledgerId).getEntryLocationIndex().getLastEntryInLedger(ledgerId);
}
public long getLocation(long ledgerId, long entryId) throws IOException {
return getLedgerStorage(ledgerId).getEntryLocationIndex().getLocation(ledgerId, entryId);
}
private SingleDirectoryDbLedgerStorage getLedgerStorage(long ledgerId) {
return ledgerStorageList.get(MathUtils.signSafeMod(ledgerId, numberOfDirs));
}
public Iterable<Long> getActiveLedgersInRange(long firstLedgerId, long lastLedgerId) throws IOException {
List<Iterable<Long>> listIt = new ArrayList<>(numberOfDirs);
for (SingleDirectoryDbLedgerStorage ls : ledgerStorageList) {
listIt.add(ls.getActiveLedgersInRange(firstLedgerId, lastLedgerId));
}
return Iterables.concat(listIt);
}
public ByteBuf getLastEntry(long ledgerId) throws IOException, BookieException {
return getLedgerStorage(ledgerId).getLastEntry(ledgerId);
}
@VisibleForTesting
boolean isFlushRequired() {
return ledgerStorageList.stream().allMatch(SingleDirectoryDbLedgerStorage::isFlushRequired);
}
@VisibleForTesting
List<SingleDirectoryDbLedgerStorage> getLedgerStorageList() {
return ledgerStorageList;
}
/**
* Reads ledger index entries to get list of entry-logger that contains given ledgerId.
*
* @param ledgerId
* @param serverConf
* @param processor
* @throws IOException
*/
public static void readLedgerIndexEntries(long ledgerId, ServerConfiguration serverConf,
LedgerLoggerProcessor processor) throws IOException {
checkNotNull(serverConf, "ServerConfiguration can't be null");
checkNotNull(processor, "LedgerLoggger info processor can't null");
DiskChecker diskChecker = new DiskChecker(serverConf.getDiskUsageThreshold(),
serverConf.getDiskUsageWarnThreshold());
LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(serverConf,
serverConf.getLedgerDirs(), diskChecker);
LedgerDirsManager indexDirsManager = ledgerDirsManager;
File[] idxDirs = serverConf.getIndexDirs();
if (null != idxDirs) {
indexDirsManager = new LedgerDirsManager(serverConf, idxDirs, diskChecker);
}
List<File> ledgerDirs = ledgerDirsManager.getAllLedgerDirs();
List<File> indexDirs = indexDirsManager.getAllLedgerDirs();
if (ledgerDirs.size() != indexDirs.size()) {
throw new IOException("ledger and index dirs size not matched");
}
int dirIndex = MathUtils.signSafeMod(ledgerId, ledgerDirs.size());
String indexBasePath = indexDirs.get(dirIndex).toString();
EntryLocationIndex entryLocationIndex = new EntryLocationIndex(serverConf,
(basePath, subPath, dbConfigType, conf1) ->
new KeyValueStorageRocksDB(basePath, subPath, DbConfigType.Default, conf1, true),
indexBasePath, NullStatsLogger.INSTANCE);
try {
long lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
for (long currentEntry = 0; currentEntry <= lastEntryId; currentEntry++) {
long offset = entryLocationIndex.getLocation(ledgerId, currentEntry);
if (offset <= 0) {
// entry not found in this bookie
continue;
}
long entryLogId = offset >> 32L;
long position = offset & 0xffffffffL;
processor.process(currentEntry, entryLogId, position);
}
} finally {
entryLocationIndex.close();
}
}
@Override
public void forceGC() {
ledgerStorageList.stream().forEach(SingleDirectoryDbLedgerStorage::forceGC);
}
@Override
public void forceGC(boolean forceMajor, boolean forceMinor) {
ledgerStorageList.stream().forEach(s -> s.forceGC(forceMajor, forceMinor));
}
@Override
public boolean isInForceGC() {
return ledgerStorageList.stream().anyMatch(SingleDirectoryDbLedgerStorage::isInForceGC);
}
@Override
public void suspendMinorGC() {
ledgerStorageList.stream().forEach(SingleDirectoryDbLedgerStorage::suspendMinorGC);
}
@Override
public void suspendMajorGC() {
ledgerStorageList.stream().forEach(SingleDirectoryDbLedgerStorage::suspendMajorGC);
}
@Override
public void resumeMinorGC() {
ledgerStorageList.stream().forEach(SingleDirectoryDbLedgerStorage::resumeMinorGC);
}
@Override
public void resumeMajorGC() {
ledgerStorageList.stream().forEach(SingleDirectoryDbLedgerStorage::resumeMajorGC);
}
@Override
public boolean isMajorGcSuspended() {
return ledgerStorageList.stream().allMatch(SingleDirectoryDbLedgerStorage::isMajorGcSuspended);
}
@Override
public boolean isMinorGcSuspended() {
return ledgerStorageList.stream().allMatch(SingleDirectoryDbLedgerStorage::isMinorGcSuspended);
}
@Override
public void entryLocationCompact() {
ledgerStorageList.forEach(SingleDirectoryDbLedgerStorage::entryLocationCompact);
}
@Override
public void entryLocationCompact(List<String> locations) {
for (SingleDirectoryDbLedgerStorage ledgerStorage : ledgerStorageList) {
String entryLocation = ledgerStorage.getEntryLocationDBPath().get(0);
if (locations.contains(entryLocation)) {
ledgerStorage.entryLocationCompact();
}
}
}
@Override
public boolean isEntryLocationCompacting() {
return ledgerStorageList.stream().anyMatch(SingleDirectoryDbLedgerStorage::isEntryLocationCompacting);
}
@Override
public Map<String, Boolean> isEntryLocationCompacting(List<String> locations) {
HashMap<String, Boolean> isCompacting = Maps.newHashMap();
for (SingleDirectoryDbLedgerStorage ledgerStorage : ledgerStorageList) {
String entryLocation = ledgerStorage.getEntryLocationDBPath().get(0);
if (locations.contains(entryLocation)) {
isCompacting.put(entryLocation, ledgerStorage.isEntryLocationCompacting());
}
}
return isCompacting;
}
@Override
public List<String> getEntryLocationDBPath() {
List<String> allEntryLocationDBPath = Lists.newArrayList();
for (SingleDirectoryDbLedgerStorage ledgerStorage : ledgerStorageList) {
allEntryLocationDBPath.addAll(ledgerStorage.getEntryLocationDBPath());
}
return allEntryLocationDBPath;
}
@Override
public List<GarbageCollectionStatus> getGarbageCollectionStatus() {
return ledgerStorageList.stream()
.map(single -> single.getGarbageCollectionStatus().get(0)).collect(Collectors.toList());
}
static long getLongVariableOrDefault(ServerConfiguration conf, String keyName, long defaultValue) {
Object obj = conf.getProperty(keyName);
if (obj instanceof Number) {
return ((Number) obj).longValue();
} else if (obj == null) {
return defaultValue;
} else if (StringUtils.isEmpty(conf.getString(keyName))) {
return defaultValue;
} else {
return conf.getLong(keyName);
}
}
static boolean getBooleanVariableOrDefault(ServerConfiguration conf, String keyName, boolean defaultValue) {
Object obj = conf.getProperty(keyName);
if (obj instanceof Boolean) {
return (Boolean) obj;
} else if (obj == null) {
return defaultValue;
} else if (StringUtils.isEmpty(conf.getString(keyName))) {
return defaultValue;
} else {
return conf.getBoolean(keyName);
}
}
@Override
public OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException {
// check Issue #2078
throw new UnsupportedOperationException(
"getListOfEntriesOfLedger method is currently unsupported for DbLedgerStorage");
}
@Override
public void setLimboState(long ledgerId) throws IOException {
getLedgerStorage(ledgerId).setLimboState(ledgerId);
}
@Override
public boolean hasLimboState(long ledgerId) throws IOException {
return getLedgerStorage(ledgerId).hasLimboState(ledgerId);
}
@Override
public void clearLimboState(long ledgerId) throws IOException {
getLedgerStorage(ledgerId).clearLimboState(ledgerId);
}
@Override
public EnumSet<StorageState> getStorageStateFlags() throws IOException {
return getLedgerStorage(STORAGE_FLAGS_KEY).getStorageStateFlags();
}
@Override
public void setStorageStateFlag(StorageState flag) throws IOException {
getLedgerStorage(STORAGE_FLAGS_KEY).setStorageStateFlag(flag);
}
@Override
public void clearStorageStateFlag(StorageState flag) throws IOException {
getLedgerStorage(STORAGE_FLAGS_KEY).clearStorageStateFlag(flag);
}
}
| 549 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ArrayUtil.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import java.nio.ByteOrder;
/**
* Utility to serialize/deserialize longs into byte arrays.
*/
class ArrayUtil {
private static final boolean UNALIGNED = io.netty.util.internal.PlatformDependent.isUnaligned();
private static final boolean HAS_UNSAFE = io.netty.util.internal.PlatformDependent.hasUnsafe();
private static final boolean BIG_ENDIAN_NATIVE_ORDER = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
public static long getLong(byte[] array, int index) {
if (HAS_UNSAFE && UNALIGNED) {
long v = io.netty.util.internal.PlatformDependent.getLong(array, index);
return BIG_ENDIAN_NATIVE_ORDER ? v : Long.reverseBytes(v);
}
return ((long) array[index] & 0xff) << 56
| ((long) array[index + 1] & 0xff) << 48
| ((long) array[index + 2] & 0xff) << 40
| ((long) array[index + 3] & 0xff) << 32
| ((long) array[index + 4] & 0xff) << 24
| ((long) array[index + 5] & 0xff) << 16
| ((long) array[index + 6] & 0xff) << 8
| (long) array[index + 7] & 0xff;
}
public static void setLong(byte[] array, int index, long value) {
if (HAS_UNSAFE && UNALIGNED) {
io.netty.util.internal.PlatformDependent
.putLong(array, index, BIG_ENDIAN_NATIVE_ORDER ? value : Long.reverseBytes(value));
} else {
array[index] = (byte) (value >>> 56);
array[index + 1] = (byte) (value >>> 48);
array[index + 2] = (byte) (value >>> 40);
array[index + 3] = (byte) (value >>> 32);
array[index + 4] = (byte) (value >>> 24);
array[index + 5] = (byte) (value >>> 16);
array[index + 6] = (byte) (value >>> 8);
array[index + 7] = (byte) value;
}
}
public static boolean isArrayAllZeros(final byte[] array) {
return io.netty.util.internal.PlatformDependent.isZero(array, 0, array.length);
}
}
| 550 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LocationsIndexRebuildOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import com.google.common.collect.Sets;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.bookkeeper.bookie.BookieImpl;
import org.apache.bookkeeper.bookie.DefaultEntryLogger;
import org.apache.bookkeeper.bookie.LedgerDirsManager;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.commons.lang.time.DurationFormatUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Scan all entries in the entry log and rebuild the locations index.
*/
public class LocationsIndexRebuildOp {
private final ServerConfiguration conf;
public LocationsIndexRebuildOp(ServerConfiguration conf) {
this.conf = conf;
}
private static final int BATCH_COMMIT_SIZE = 10_000;
public void initiate() throws IOException {
LOG.info("Starting locations index rebuilding");
File[] indexDirs = conf.getIndexDirs();
if (indexDirs == null) {
indexDirs = conf.getLedgerDirs();
}
if (indexDirs.length != conf.getLedgerDirs().length) {
throw new IOException("ledger and index dirs size not matched");
}
long startTime = System.nanoTime();
// Move locations index to a backup directory
for (int i = 0; i < conf.getLedgerDirs().length; i++) {
File ledgerDir = conf.getLedgerDirs()[i];
File indexDir = indexDirs[i];
String iBasePath = BookieImpl.getCurrentDirectory(indexDir).toString();
Path indexCurrentPath = FileSystems.getDefault().getPath(iBasePath, "locations");
String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
Path backupPath = FileSystems.getDefault().getPath(iBasePath, "locations.BACKUP-" + timestamp);
Files.move(indexCurrentPath, backupPath);
LOG.info("Created locations index backup at {}", backupPath);
File[] lDirs = new File[1];
lDirs[0] = ledgerDir;
DefaultEntryLogger entryLogger = new DefaultEntryLogger(conf, new LedgerDirsManager(conf, lDirs,
new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, iBasePath);
LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());
KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(iBasePath, "locations",
DbConfigType.Default, conf);
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
AtomicReference<KeyValueStorage.Batch> batch = new AtomicReference<>(newIndex.newBatch());
AtomicInteger count = new AtomicInteger();
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long entryId = entry.getLong(8);
// Actual location indexed is pointing past the entry size
long location = (entryLogId << 32L) | (offset + 4);
if (LOG.isDebugEnabled()) {
LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32,
location & (Integer.MAX_VALUE - 1));
}
// Update the ledger index page
LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
LongWrapper value = LongWrapper.get(location);
try {
batch.get().put(key.array, value.array);
} finally {
key.recycle();
value.recycle();
}
if (count.incrementAndGet() > BATCH_COMMIT_SIZE) {
batch.get().flush();
batch.get().close();
batch.set(newIndex.newBatch());
count.set(0);
}
}
@Override
public boolean accept(long ledgerId) {
return activeLedgers.contains(ledgerId);
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId),
completedEntryLogs, totalEntryLogs);
}
batch.get().flush();
batch.get().close();
newIndex.sync();
newIndex.close();
}
LOG.info("Rebuilding index is done. Total time: {}",
DurationFormatUtils.formatDurationHMS(
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
private Set<Long> getActiveLedgers(ServerConfiguration conf, KeyValueStorageFactory storageFactory, String basePath)
throws IOException {
LedgerMetadataIndex ledgers = new LedgerMetadataIndex(conf, storageFactory, basePath, NullStatsLogger.INSTANCE);
Set<Long> activeLedgers = Sets.newHashSet();
for (Long ledger : ledgers.getActiveLedgersInRange(0, Long.MAX_VALUE)) {
activeLedgers.add(ledger);
}
ledgers.close();
return activeLedgers;
}
private static final Logger LOG = LoggerFactory.getLogger(LocationsIndexRebuildOp.class);
}
| 551 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/package-info.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
/**
* Classes related to DB based ledger storage.
*/
package org.apache.bookkeeper.bookie.storage.ldb; | 552 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/LedgersIndexRebuildOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie.storage.ldb;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.bookkeeper.bookie.BookieImpl;
import org.apache.bookkeeper.bookie.DefaultEntryLogger;
import org.apache.bookkeeper.bookie.Journal;
import org.apache.bookkeeper.bookie.LedgerDirsManager;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.ldb.KeyValueStorageFactory.DbConfigType;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.util.BookKeeperConstants;
import org.apache.bookkeeper.util.DiskChecker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Scan all entries in the journal and entry log files then rebuilds the ledgers index.
* Notable stuff:
* - Fences every ledger as even if we check the metadata, we cannot guarantee that
* a fence request was served while the rebuild was taking place (even if the bookie
* is running in read-only mode).
* Losing the fenced status of a ledger is UNSAFE.
* - Sets the master key as an empty byte array. This is correct as empty master keys
* are overwritten and we cannot use the password from metadata, and cannot know 100%
* for sure how a digest for the password was generated.
*/
public class LedgersIndexRebuildOp {
private static final Logger LOG = LoggerFactory.getLogger(LedgersIndexRebuildOp.class);
private final ServerConfiguration conf;
private final boolean verbose;
private static final String LedgersSubPath = "ledgers";
public LedgersIndexRebuildOp(ServerConfiguration conf, boolean verbose) {
this.conf = conf;
this.verbose = verbose;
}
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public boolean initiate() {
LOG.info("Starting ledger index rebuilding");
File[] indexDirs = conf.getIndexDirs();
if (indexDirs == null) {
indexDirs = conf.getLedgerDirs();
}
if (indexDirs.length != conf.getLedgerDirs().length) {
LOG.error("ledger and index dirs size not matched");
return false;
}
for (int i = 0; i < indexDirs.length; i++) {
File indexDir = indexDirs[i];
File ledgerDir = conf.getLedgerDirs()[i];
String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
String indexBasePath = BookieImpl.getCurrentDirectory(indexDir).toString();
String tempLedgersSubPath = LedgersSubPath + ".TEMP-" + timestamp;
Path indexTempPath = FileSystems.getDefault().getPath(indexBasePath, tempLedgersSubPath);
Path indexCurrentPath = FileSystems.getDefault().getPath(indexBasePath, LedgersSubPath);
LOG.info("Starting scan phase (scans journal and entry log files)");
try {
Set<Long> ledgers = new HashSet<>();
scanJournals(ledgers);
File[] lDirs = new File[1];
lDirs[0] = ledgerDir;
scanEntryLogFiles(ledgers, lDirs);
LOG.info("Scan complete, found {} ledgers. "
+ "Starting to build a new ledgers index", ledgers.size());
try (KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(
indexBasePath, tempLedgersSubPath, DbConfigType.Default, conf)) {
LOG.info("Created ledgers index at temp location {}", indexTempPath);
for (Long ledgerId : ledgers) {
DbLedgerStorageDataFormats.LedgerData ledgerData =
DbLedgerStorageDataFormats.LedgerData.newBuilder()
.setExists(true)
.setFenced(true)
.setMasterKey(ByteString.EMPTY).build();
byte[] ledgerArray = new byte[16];
ArrayUtil.setLong(ledgerArray, 0, ledgerId);
newIndex.put(ledgerArray, ledgerData.toByteArray());
}
newIndex.sync();
}
} catch (Throwable t) {
LOG.error("Error during rebuild, the original index remains unchanged", t);
delete(indexTempPath);
return false;
}
// replace the existing index
try {
Path prevPath = FileSystems.getDefault().getPath(indexBasePath,
LedgersSubPath + ".PREV-" + timestamp);
LOG.info("Moving original index from original location: {} up to back-up location: {}",
indexCurrentPath, prevPath);
Files.move(indexCurrentPath, prevPath);
LOG.info("Moving rebuilt index from: {} to: {}", indexTempPath, indexCurrentPath);
Files.move(indexTempPath, indexCurrentPath);
LOG.info("Original index has been replaced with the new index. "
+ "The original index has been moved to {}", prevPath);
} catch (IOException e) {
LOG.error("Could not replace original index with rebuilt index. "
+ "To return to the original state, ensure the original index is in its original location", e);
return false;
}
}
return true;
}
private void scanEntryLogFiles(Set<Long> ledgers, File[] lDirs) throws IOException {
DefaultEntryLogger entryLogger = new DefaultEntryLogger(conf, new LedgerDirsManager(conf, lDirs,
new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
LOG.info("Scanning {} entry logs", totalEntryLogs);
for (long entryLogId : entryLogs) {
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
if (ledgers.add(ledgerId)) {
if (verbose) {
LOG.info("Found ledger {} in entry log", ledgerId);
}
}
}
@Override
public boolean accept(long ledgerId) {
return true;
}
});
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs,
totalEntryLogs);
}
}
private void scanJournals(Set<Long> ledgers) throws IOException {
for (Journal journal : getJournals(conf)) {
List<Long> journalIds = Journal.listJournalIds(journal.getJournalDirectory(),
new Journal.JournalIdFilter() {
@Override
public boolean accept(long journalId) {
return true;
}
});
for (Long journalId : journalIds) {
scanJournal(journal, journalId, ledgers);
}
}
}
private List<Journal> getJournals(ServerConfiguration conf) throws IOException {
List<Journal> journals = Lists.newArrayListWithCapacity(conf.getJournalDirs().length);
int idx = 0;
for (File journalDir : conf.getJournalDirs()) {
journals.add(new Journal(idx++, new File(journalDir, BookKeeperConstants.CURRENT_DIR), conf,
new LedgerDirsManager(conf, conf.getLedgerDirs(),
new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()))));
}
return journals;
}
private void scanJournal(Journal journal, long journalId, Set<Long> ledgers) throws IOException {
LOG.info("Scanning journal " + journalId + " (" + Long.toHexString(journalId) + ".txn)");
journal.scanJournal(journalId, 0L, new Journal.JournalScanner() {
@Override
public void process(int journalVersion, long offset, ByteBuffer entry) {
ByteBuf buf = Unpooled.wrappedBuffer(entry);
long ledgerId = buf.readLong();
if (ledgers.add(ledgerId) && verbose) {
LOG.info("Found ledger {} in journal", ledgerId);
}
}
}, false);
}
private void delete(Path path) {
try {
Files.delete(path);
} catch (IOException e) {
LOG.warn("Unable to delete {}", path.toAbsolutePath(), e);
}
}
}
| 553 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/DataIntegrityCheckImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import com.google.common.collect.ImmutableSortedMap;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Maybe;
import io.reactivex.rxjava3.core.Scheduler;
import io.reactivex.rxjava3.core.Single;
import io.reactivex.rxjava3.disposables.Disposable;
import java.io.IOException;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.LedgerStorage;
import org.apache.bookkeeper.bookie.LedgerStorage.StorageState;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BookKeeperAdmin;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.net.BookieId;
/**
* An implementation of the DataIntegrityCheck interface.
*/
@Slf4j
public class DataIntegrityCheckImpl implements DataIntegrityCheck {
private static final int MAX_INFLIGHT = 300;
private static final int MAX_ENTRIES_INFLIGHT = 3000;
private static final int ZK_TIMEOUT_S = 30;
private final BookieId bookieId;
private final LedgerManager ledgerManager;
private final LedgerStorage ledgerStorage;
private final EntryCopier entryCopier;
private final BookKeeperAdmin admin;
private final Scheduler scheduler;
private final AtomicReference<Map<Long, LedgerMetadata>> ledgersCacheRef =
new AtomicReference<>(null);
private CompletableFuture<Void> preBootFuture;
public DataIntegrityCheckImpl(BookieId bookieId,
LedgerManager ledgerManager,
LedgerStorage ledgerStorage,
EntryCopier entryCopier,
BookKeeperAdmin admin,
Scheduler scheduler) {
this.bookieId = bookieId;
this.ledgerManager = ledgerManager;
this.ledgerStorage = ledgerStorage;
this.entryCopier = entryCopier;
this.admin = admin;
this.scheduler = scheduler;
}
@Override
public synchronized CompletableFuture<Void> runPreBootCheck(String reason) {
// we only run this once, it could be kicked off by different checks
if (preBootFuture == null) {
preBootFuture = runPreBootSequence(reason);
}
return preBootFuture;
}
private CompletableFuture<Void> runPreBootSequence(String reason) {
String runId = UUID.randomUUID().toString();
log.info("Event: {}, RunId: {}, Reason: {}", Events.PREBOOT_START, runId, reason);
try {
this.ledgerStorage.setStorageStateFlag(StorageState.NEEDS_INTEGRITY_CHECK);
} catch (IOException ioe) {
log.error("Event: {}, RunId: {}", Events.PREBOOT_ERROR, runId, ioe);
return FutureUtils.exception(ioe);
}
MetadataAsyncIterator iter = new MetadataAsyncIterator(scheduler,
ledgerManager, MAX_INFLIGHT, ZK_TIMEOUT_S, TimeUnit.SECONDS);
CompletableFuture<Void> promise = new CompletableFuture<>();
Map<Long, LedgerMetadata> ledgersCache =
new ConcurrentSkipListMap<>(Comparator.<Long>naturalOrder().reversed());
iter.forEach((ledgerId, metadata) -> {
if (ensemblesContainBookie(metadata, bookieId)) {
ledgersCache.put(ledgerId, metadata);
try {
if (!ledgerStorage.ledgerExists(ledgerId)) {
ledgerStorage.setMasterKey(ledgerId, new byte[0]);
}
} catch (IOException ioe) {
log.error("Event: {}, RunId: {}, LedgerId: {}",
Events.ENSURE_LEDGER_ERROR, runId, ledgerId, ioe);
return FutureUtils.exception(ioe);
}
}
return processPreBoot(ledgerId, metadata, runId);
})
.whenComplete((ignore, exception) -> {
if (exception != null) {
log.error("Event: {}, runId: {}", Events.PREBOOT_ERROR, runId, exception);
promise.completeExceptionally(exception);
} else {
try {
this.ledgerStorage.flush();
updateMetadataCache(ledgersCache);
log.info("Event: {}, runId: {}, processed: {}",
Events.PREBOOT_END, runId, ledgersCache.size());
promise.complete(null);
} catch (Throwable t) {
log.error("Event: {}, runId: {}", Events.PREBOOT_ERROR, runId, t);
promise.completeExceptionally(t);
}
}
});
return promise;
}
@Override
public boolean needsFullCheck() throws IOException {
return this.ledgerStorage.getStorageStateFlags()
.contains(StorageState.NEEDS_INTEGRITY_CHECK);
}
@Override
public CompletableFuture<Void> runFullCheck() {
String runId = UUID.randomUUID().toString();
log.info("Event: {}, runId: {}", Events.FULL_CHECK_INIT, runId);
return getCachedOrReadMetadata(runId)
.thenCompose(
(ledgers) -> {
log.info("Event: {}, runId: {}, ledgerCount: {}",
Events.FULL_CHECK_START, runId, ledgers.size());
return checkAndRecoverLedgers(ledgers, runId).thenApply((resolved) -> {
for (LedgerResult r : resolved) {
if (r.isMissing() || r.isOK()) {
ledgers.remove(r.getLedgerId());
} else if (r.isError()) {
// if there was an error, make sure we have the latest
// metadata for the next iteration
ledgers.put(r.getLedgerId(), r.getMetadata());
}
}
Optional<Throwable> firstError = resolved.stream().filter(r -> r.isError())
.map(r -> r.getThrowable()).findFirst();
if (firstError.isPresent()) {
log.error("Event: {}, runId: {}, ok: {}"
+ ", error: {}, missing: {}, ledgersToRetry: {}",
Events.FULL_CHECK_END, runId,
resolved.stream().filter(r -> r.isOK()).count(),
resolved.stream().filter(r -> r.isError()).count(),
resolved.stream().filter(r -> r.isMissing()).count(),
ledgers.size(), firstError.get());
} else {
log.info("Event: {}, runId: {}, ok: {}, error: 0, missing: {}, ledgersToRetry: {}",
Events.FULL_CHECK_END, runId,
resolved.stream().filter(r -> r.isOK()).count(),
resolved.stream().filter(r -> r.isMissing()).count(),
ledgers.size());
}
return ledgers;
});
})
.thenCompose(
(ledgers) -> {
CompletableFuture<Void> promise = new CompletableFuture<>();
try {
this.ledgerStorage.flush();
if (ledgers.isEmpty()) {
log.info("Event: {}, runId: {}", Events.CLEAR_INTEGCHECK_FLAG, runId);
this.ledgerStorage.clearStorageStateFlag(
StorageState.NEEDS_INTEGRITY_CHECK);
}
// not really needed as we are modifying the map in place
updateMetadataCache(ledgers);
log.info("Event: {}, runId: {}", Events.FULL_CHECK_COMPLETE, runId);
promise.complete(null);
} catch (IOException ioe) {
log.error("Event: {}, runId: {}", Events.FULL_CHECK_ERROR, runId, ioe);
promise.completeExceptionally(ioe);
}
return promise;
});
}
void updateMetadataCache(Map<Long, LedgerMetadata> ledgers) {
ledgersCacheRef.set(ledgers);
}
CompletableFuture<Map<Long, LedgerMetadata>> getCachedOrReadMetadata(String runId) {
Map<Long, LedgerMetadata> map = ledgersCacheRef.get();
if (map != null) {
log.info("Event: {}, runId: {}, ledgerCount: {}", Events.USE_CACHED_METADATA, runId,
map.size());
return CompletableFuture.completedFuture(map);
} else {
log.info("Event: {}, runId: {}", Events.REFRESH_METADATA, runId);
MetadataAsyncIterator iter = new MetadataAsyncIterator(scheduler,
ledgerManager, MAX_INFLIGHT, ZK_TIMEOUT_S, TimeUnit.SECONDS);
Map<Long, LedgerMetadata> ledgersCache =
new ConcurrentSkipListMap<>(Comparator.<Long>naturalOrder().reversed());
return iter.forEach((ledgerId, metadata) -> {
if (ensemblesContainBookie(metadata, bookieId)) {
ledgersCache.put(ledgerId, metadata);
}
return CompletableFuture.completedFuture(null);
})
.thenApply(ignore -> {
updateMetadataCache(ledgersCache);
return ledgersCache;
});
}
}
/**
* Check whether the current bookie exists in the last ensemble of the bookie.
* If it does, and the ledger is not closed, then this bookie may have accepted a fencing
* request or an entry which it no longer contains. The only way to resolve this is to
* open/recover the ledger. This bookie should not take part in the recovery, so the bookie
* must be marked as in limbo. This will stop the bookie from responding to read requests for
* that ledger, so clients will not be able to take into account the response of the bookie
* during recovery. Effectively we are telling the client that we don't know whether we had
* certain entries or not, so go look elsewhere.
* We also fence all ledgers with this bookie in the last segment, to prevent any new writes,
* so that after the limbo state is cleared, we won't accept any new writes.
* We only need to consider final ensembles in non-closed ledgers at the moment of time that
* the preboot check commences. If this bookie is added to a new ensemble after that point in
* time, we know that we haven't received any entries for that segment, nor have we received
* a fencing request, because we are still in the preboot sequence.
*/
private CompletableFuture<Void> processPreBoot(long ledgerId, LedgerMetadata metadata,
String runId) {
Map.Entry<Long, ? extends List<BookieId>> lastEnsemble = metadata.getAllEnsembles().lastEntry();
CompletableFuture<Void> promise = new CompletableFuture<>();
if (lastEnsemble == null) {
log.error("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.INVALID_METADATA, runId, metadata, ledgerId);
promise.completeExceptionally(
new IllegalStateException(
String.format("All metadata must have at least one ensemble, %d does not", ledgerId)));
return promise;
}
if (!metadata.isClosed() && lastEnsemble.getValue().contains(bookieId)) {
try {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.MARK_LIMBO, runId, metadata, ledgerId);
ledgerStorage.setLimboState(ledgerId);
ledgerStorage.setFenced(ledgerId);
promise.complete(null);
} catch (IOException ioe) {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.LIMBO_OR_FENCE_ERROR, runId, metadata, ledgerId, ioe);
promise.completeExceptionally(ioe);
}
} else {
promise.complete(null);
}
return promise;
}
static class LedgerResult {
enum State {
MISSING, ERROR, OK
};
static LedgerResult missing(long ledgerId) {
return new LedgerResult(State.MISSING, ledgerId, null, null);
}
static LedgerResult ok(long ledgerId, LedgerMetadata metadata) {
return new LedgerResult(State.OK, ledgerId, metadata, null);
}
static LedgerResult error(long ledgerId, LedgerMetadata metadata, Throwable t) {
return new LedgerResult(State.ERROR, ledgerId, metadata, t);
}
private final State state;
private final long ledgerId;
private final LedgerMetadata metadata;
private final Throwable throwable;
private LedgerResult(State state, long ledgerId,
LedgerMetadata metadata, Throwable throwable) {
this.state = state;
this.ledgerId = ledgerId;
this.metadata = metadata;
this.throwable = throwable;
}
boolean isMissing() {
return state == State.MISSING;
}
boolean isOK() {
return state == State.OK;
}
boolean isError() {
return state == State.ERROR;
}
long getLedgerId() {
return ledgerId;
}
LedgerMetadata getMetadata() {
return metadata;
}
Throwable getThrowable() {
return throwable;
}
}
/**
* Check each ledger passed.
* If the ledger is in limbo, recover it.
* Check that the bookie has all entries that it is expected to have.
* Copy any entries that are missing.
* @return The set of results for all ledgers passed. A result can be OK, Missing or Error.
* OK and missing ledgers do not need to be looked at again. Error should be retried.
*/
CompletableFuture<Set<LedgerResult>> checkAndRecoverLedgers(Map<Long, LedgerMetadata> ledgers,
String runId) {
CompletableFuture<Set<LedgerResult>> promise = new CompletableFuture<>();
final Disposable disposable = Flowable.fromIterable(ledgers.entrySet())
.subscribeOn(scheduler, false)
.flatMapSingle((mapEntry) -> {
long ledgerId = mapEntry.getKey();
LedgerMetadata originalMetadata = mapEntry.getValue();
return recoverLedgerIfInLimbo(ledgerId, mapEntry.getValue(), runId)
.map(newMetadata -> LedgerResult.ok(ledgerId, newMetadata))
.onErrorReturn(t -> LedgerResult.error(ledgerId, originalMetadata, t))
.defaultIfEmpty(LedgerResult.missing(ledgerId))
.flatMap((res) -> {
try {
if (res.isOK()) {
this.ledgerStorage.clearLimboState(ledgerId);
}
return Single.just(res);
} catch (IOException ioe) {
return Single.just(LedgerResult.error(res.getLedgerId(),
res.getMetadata(), ioe));
}
});
},
true /* delayErrors */,
MAX_INFLIGHT)
.flatMapSingle((res) -> {
if (res.isOK()) {
return checkAndRecoverLedgerEntries(res.getLedgerId(),
res.getMetadata(), runId)
.map(ignore -> LedgerResult.ok(res.getLedgerId(),
res.getMetadata()))
.onErrorReturn(t -> LedgerResult.error(res.getLedgerId(),
res.getMetadata(), t));
} else {
return Single.just(res);
}
},
true /* delayErrors */,
1 /* copy 1 ledger at a time to keep entries together in entrylog */)
.collect(Collectors.toSet())
.subscribe(resolved -> promise.complete(resolved),
throwable -> promise.completeExceptionally(throwable));
promise.whenComplete((result, ex) -> disposable.dispose());
return promise;
}
/**
* Run ledger recovery on all a ledger if it has been marked as in limbo.
* @return a maybe with the most up to date metadata we have for he ledger.
* If the ledger has been deleted, returns empty.
*/
Maybe<LedgerMetadata> recoverLedgerIfInLimbo(long ledgerId, LedgerMetadata origMetadata,
String runId) {
try {
if (!this.ledgerStorage.ledgerExists(ledgerId)) {
this.ledgerStorage.setMasterKey(ledgerId, new byte[0]);
}
if (this.ledgerStorage.hasLimboState(ledgerId)) {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.RECOVER_LIMBO_LEDGER, runId, origMetadata, ledgerId);
return recoverLedger(ledgerId, runId)
.toMaybe()
.onErrorResumeNext(t -> {
if (t instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException) {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.RECOVER_LIMBO_LEDGER_MISSING, runId, origMetadata, ledgerId);
return Maybe.empty();
} else {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.RECOVER_LIMBO_LEDGER_ERROR, runId, origMetadata, ledgerId);
return Maybe.error(t);
}
});
} else {
return Maybe.just(origMetadata);
}
} catch (IOException ioe) {
return Maybe.error(ioe);
}
}
Single<LedgerMetadata> recoverLedger(long ledgerId, String runId) {
return Single.create((emitter) ->
admin.asyncOpenLedger(ledgerId, (rc, handle, ctx) -> {
if (rc != BKException.Code.OK) {
emitter.onError(BKException.create(rc));
} else {
LedgerMetadata metadata = handle.getLedgerMetadata();
handle.closeAsync().whenComplete((ignore, exception) -> {
if (exception != null) {
log.warn("Event: {}, runId: {}, ledger: {}",
Events.RECOVER_LIMBO_LEDGER_CLOSE_ERROR, runId, ledgerId, exception);
}
});
emitter.onSuccess(metadata);
}
}, null));
}
/**
* Check whether the local storage has all the entries as specified in the metadata.
* If not, copy them from other available nodes.
* Returns a single value which is the ledgerId or an error if any entry failed to copy
* should throw error if any entry failed to copy.
*/
Single<Long> checkAndRecoverLedgerEntries(long ledgerId, LedgerMetadata metadata,
String runId) {
WriteSets writeSets = new WriteSets(metadata.getEnsembleSize(),
metadata.getWriteQuorumSize());
NavigableMap<Long, Integer> bookieIndices = metadata.getAllEnsembles()
.entrySet().stream()
.collect(ImmutableSortedMap.toImmutableSortedMap(Comparator.naturalOrder(),
e -> e.getKey(),
e -> e.getValue().indexOf(bookieId)));
long lastKnownEntry;
if (metadata.isClosed()) {
lastKnownEntry = metadata.getLastEntryId();
} else {
// if ledger is not closed, last known entry is the last entry of
// the penultimate ensemble
lastKnownEntry = metadata.getAllEnsembles().lastEntry().getKey() - 1;
}
if (lastKnownEntry < 0) {
return Single.just(ledgerId);
}
EntryCopier.Batch batch;
try {
batch = entryCopier.newBatch(ledgerId, metadata);
} catch (IOException ioe) {
return Single.error(ioe);
}
AtomicLong byteCount = new AtomicLong(0);
AtomicInteger count = new AtomicInteger(0);
AtomicInteger errorCount = new AtomicInteger(0);
AtomicReference<Throwable> firstError = new AtomicReference<>(null);
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}",
Events.LEDGER_CHECK_AND_COPY_START, runId, metadata, ledgerId);
return Flowable.rangeLong(0, lastKnownEntry + 1)
.subscribeOn(scheduler, false)
.flatMapMaybe((entryId) -> {
return maybeCopyEntry(writeSets, bookieIndices, ledgerId, entryId, batch)
.doOnError((t) -> {
firstError.compareAndSet(null, t);
errorCount.incrementAndGet();
});
}, true /* delayErrors */, MAX_ENTRIES_INFLIGHT)
.doOnNext((bytes) -> {
byteCount.addAndGet(bytes);
count.incrementAndGet();
})
.count() // do nothing with result, but gives a single even if empty
.doOnTerminate(() -> {
if (firstError.get() != null) {
log.warn("Event: {}, runId: {}, metadata: {}, ledger: {}, entries: {}, bytes: {}, errors: {}",
Events.LEDGER_CHECK_AND_COPY_END, runId,
metadata, ledgerId, count.get(), byteCount.get(), firstError.get());
} else {
log.info("Event: {}, runId: {}, metadata: {}, ledger: {}, entries: {}, bytes: {}, errors: 0",
Events.LEDGER_CHECK_AND_COPY_END, runId,
metadata, ledgerId, count.get(), byteCount.get());
}
})
.map(ignore -> ledgerId);
}
/**
* @return the number of bytes copied.
*/
Maybe<Long> maybeCopyEntry(WriteSets writeSets, NavigableMap<Long, Integer> bookieIndices,
long ledgerId, long entryId, EntryCopier.Batch batch) {
try {
if (isEntryMissing(writeSets, bookieIndices, ledgerId, entryId)) {
return Maybe.fromCompletionStage(batch.copyFromAvailable(entryId));
} else {
return Maybe.empty();
}
} catch (BookieException | IOException ioe) {
return Maybe.error(ioe);
}
}
boolean isEntryMissing(WriteSets writeSets, NavigableMap<Long, Integer> bookieIndices,
long ledgerId, long entryId) throws IOException, BookieException {
int bookieIndexForEntry = bookieIndices.floorEntry(entryId).getValue();
if (bookieIndexForEntry < 0) {
return false;
}
return writeSets.getForEntry(entryId).contains(bookieIndexForEntry)
&& !ledgerStorage.entryExists(ledgerId, entryId);
}
static boolean ensemblesContainBookie(LedgerMetadata metadata, BookieId bookieId) {
return metadata.getAllEnsembles().values().stream()
.anyMatch(ensemble -> ensemble.contains(bookieId));
}
}
| 554 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/EntryCopier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.client.api.LedgerMetadata;
/**
* Interface for copying entries from other bookies.
* The implementation should take care of selecting the order of the replicas
* from which we try to read, taking into account stickiness and errors.
* The implementation should take care of rate limiting.
*/
public interface EntryCopier {
/**
* Start copying a new batch. In general, there should be a batch per ledger.
*/
Batch newBatch(long ledgerId, LedgerMetadata metadata) throws IOException;
/**
* An interface for a batch to be copied.
*/
interface Batch {
/**
* Copy an entry from a remote bookie and store it locally.
* @return the number of bytes copied.
*/
CompletableFuture<Long> copyFromAvailable(long entryId);
}
}
| 555 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/MetadataAsyncIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Scheduler;
import io.reactivex.rxjava3.disposables.Disposable;
import java.io.IOException;
import java.util.Iterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.versioning.Versioned;
/**
* An rxjava ledger metadata iterator.
*/
@Slf4j
public class MetadataAsyncIterator {
private final Scheduler scheduler;
private final LedgerManager ledgerManager;
private final long zkTimeoutMs;
private final int maxInFlight;
MetadataAsyncIterator(Scheduler scheduler,
LedgerManager ledgerManager, int maxInFlight,
int zkTimeout, TimeUnit zkTimeoutUnit) {
this.scheduler = scheduler;
this.ledgerManager = ledgerManager;
this.maxInFlight = maxInFlight;
this.zkTimeoutMs = zkTimeoutUnit.toMillis(zkTimeout);
}
private static class FlatIterator {
final LedgerManager.LedgerRangeIterator ranges;
Iterator<Long> range = null;
FlatIterator(LedgerManager.LedgerRangeIterator ranges) {
this.ranges = ranges;
}
boolean hasNext() throws IOException {
if (range == null || !range.hasNext()) {
if (ranges.hasNext()) {
range = ranges.next().getLedgers().iterator();
}
}
return range != null && range.hasNext();
}
Long next() throws IOException {
return range.next();
}
}
public CompletableFuture<Void> forEach(BiFunction<Long, LedgerMetadata, CompletableFuture<Void>> consumer) {
CompletableFuture<Void> promise = new CompletableFuture<>();
final Disposable disposable = Flowable.<Long, FlatIterator>generate(
() -> new FlatIterator(ledgerManager.getLedgerRanges(zkTimeoutMs)),
(iter, emitter) -> {
try {
if (iter.hasNext()) {
emitter.onNext(iter.next());
} else {
emitter.onComplete();
}
} catch (Exception e) {
emitter.onError(e);
}
})
.subscribeOn(scheduler)
.flatMapCompletable((ledgerId) -> Completable.fromCompletionStage(processOne(ledgerId, consumer)),
false /* delayErrors */,
maxInFlight)
.subscribe(() -> promise.complete(null),
t -> promise.completeExceptionally(unwrap(t)));
promise.whenComplete((result, ex) -> disposable.dispose());
return promise;
}
private CompletableFuture<Void> processOne(long ledgerId,
BiFunction<Long, LedgerMetadata, CompletableFuture<Void>> consumer) {
return ledgerManager.readLedgerMetadata(ledgerId)
.thenApply(Versioned::getValue)
.thenCompose((metadata) -> consumer.apply(ledgerId, metadata))
.exceptionally((e) -> {
Throwable realException = unwrap(e);
log.warn("Got exception processing ledger {}", ledgerId, realException);
if (realException instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException) {
return null;
} else {
throw new CompletionException(realException);
}
});
}
static Throwable unwrap(Throwable e) {
if (e instanceof CompletionException || e instanceof ExecutionException) {
return unwrap(e.getCause());
}
return e;
}
}
| 556 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/DataIntegrityCookieValidation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.BookieImpl;
import org.apache.bookkeeper.bookie.Cookie;
import org.apache.bookkeeper.bookie.CookieValidation;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of the CookieValidation interface that allows for auto-stamping
* cookies when configured and used in conjunction with the data integrity service.
* Because the data integrity service can heal a bookie with lost data due to a disk
* failure, a bookie can auto stamp new cookies as part of the healing process.
*/
public class DataIntegrityCookieValidation implements CookieValidation {
private static final Logger log = LoggerFactory.getLogger(DataIntegrityCookieValidation.class);
private final ServerConfiguration conf;
private final BookieId bookieId;
private final RegistrationManager registrationManager;
private final DataIntegrityCheck dataIntegCheck;
public DataIntegrityCookieValidation(ServerConfiguration conf,
RegistrationManager registrationManager,
DataIntegrityCheck dataIntegCheck)
throws UnknownHostException {
this.conf = conf;
this.registrationManager = registrationManager;
this.bookieId = BookieImpl.getBookieId(conf);
this.dataIntegCheck = dataIntegCheck;
}
private Optional<Versioned<Cookie>> getRegManagerCookie() throws BookieException {
try {
return Optional.of(Cookie.readFromRegistrationManager(registrationManager, bookieId));
} catch (BookieException.CookieNotFoundException noCookieException) {
return Optional.empty();
}
}
private List<Optional<Cookie>> collectDirectoryCookies(List<File> directories) throws BookieException {
List<Optional<Cookie>> cookies = new ArrayList<>();
for (File d : directories) {
try {
cookies.add(Optional.of(Cookie.readFromDirectory(d)));
} catch (FileNotFoundException fnfe) {
cookies.add(Optional.empty());
} catch (IOException ioe) {
throw new BookieException.InvalidCookieException(ioe);
}
}
return cookies;
}
private void stampCookie(Cookie masterCookie, Version expectedVersion, List<File> directories)
throws BookieException {
// stamp to ZK first as it's the authoritive cookie. If this fails part way through
// stamping the directories, then a data integrity check will occur.
log.info("Stamping cookie to ZK");
masterCookie.writeToRegistrationManager(registrationManager, conf, expectedVersion);
for (File d : directories) {
try {
log.info("Stamping cookie to directory {}", d);
masterCookie.writeToDirectory(d);
} catch (IOException ioe) {
log.error("Exception writing cookie", ioe);
throw new BookieException.InvalidCookieException(ioe);
}
}
}
@Override
public void checkCookies(List<File> directories)
throws BookieException, InterruptedException {
String instanceId = registrationManager.getClusterInstanceId();
if (instanceId == null) {
throw new BookieException.InvalidCookieException("Cluster instance ID unavailable");
}
Cookie masterCookie;
try {
masterCookie = Cookie.generateCookie(conf).setInstanceId(instanceId).build();
} catch (UnknownHostException uhe) {
throw new BookieException.InvalidCookieException(uhe);
}
// collect existing cookies
Optional<Versioned<Cookie>> regManagerCookie = getRegManagerCookie();
List<Optional<Cookie>> directoryCookies = collectDirectoryCookies(directories);
// if master is empty, everything must be empty, otherwise the cluster is messed up
if (!regManagerCookie.isPresent()) {
// if everything is empty, it's a new install, just stamp the cookies
if (directoryCookies.stream().noneMatch(Optional::isPresent)) {
log.info("New environment found. Stamping cookies");
stampCookie(masterCookie, Version.NEW, directories);
} else {
String errorMsg =
"Cookie missing from ZK. Either it was manually deleted, "
+ "or the bookie was started pointing to a different ZK cluster "
+ "than the one it was originally started with. "
+ "This requires manual intervention to fix";
log.error(errorMsg);
throw new BookieException.InvalidCookieException(errorMsg);
}
} else if (!regManagerCookie.get().getValue().equals(masterCookie)
|| !directoryCookies.stream().allMatch(c -> c.map(masterCookie::equals).orElse(false))) {
if (conf.isDataIntegrityStampMissingCookiesEnabled()) {
log.warn("ZK cookie({}) or directory cookies({}) do not match master cookie ({}), running check",
regManagerCookie, directoryCookies, masterCookie);
try {
dataIntegCheck.runPreBootCheck("INVALID_COOKIE").get();
} catch (ExecutionException ee) {
if (ee.getCause() instanceof BookieException) {
throw (BookieException) ee.getCause();
} else {
throw new BookieException.InvalidCookieException(ee.getCause());
}
}
log.info("Environment should be in a sane state. Stamp new cookies");
stampCookie(masterCookie, regManagerCookie.get().getVersion(), directories);
} else {
String errorMsg = MessageFormat.format(
"ZK cookie({0}) or directory cookies({1}) do not match master cookie ({2})"
+ " and missing cookie stamping is disabled.",
regManagerCookie, directoryCookies, masterCookie);
log.error(errorMsg);
throw new BookieException.InvalidCookieException(errorMsg);
}
} // else all cookies match the masterCookie, meaning nothing has changed in the configuration
}
}
| 557 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/WriteSets.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Pregenerate the write sets. RoundRobinDistributionSchedule should really be doing this also.
*/
class WriteSets {
private static final Logger log = LoggerFactory.getLogger(WriteSets.class);
private final int ensembleSize;
private final ImmutableList<ImmutableList<Integer>> sets;
WriteSets(List<Integer> preferredOrder,
int ensembleSize,
int writeQuorumSize) {
this.ensembleSize = ensembleSize;
ImmutableList.Builder<ImmutableList<Integer>> builder =
new ImmutableList.Builder<ImmutableList<Integer>>();
for (int i = 0; i < ensembleSize; i++) {
builder.add(generateWriteSet(preferredOrder, ensembleSize, writeQuorumSize, i));
}
sets = builder.build();
}
WriteSets(int ensembleSize, int writeQuorumSize) {
this(IntStream.range(0, ensembleSize).boxed().collect(Collectors.toList()),
ensembleSize, writeQuorumSize);
}
ImmutableList<Integer> getForEntry(long entryId) {
return sets.get((int) (entryId % ensembleSize));
}
static ImmutableList<Integer> generateWriteSet(List<Integer> preferredOrder,
int ensembleSize,
int writeQuorumSize,
int offset) {
ImmutableList.Builder<Integer> builder =
new ImmutableList.Builder<Integer> ();
int firstIndex = offset;
int lastIndex = (offset + writeQuorumSize - 1) % ensembleSize;
for (Integer i : preferredOrder) {
if (firstIndex <= lastIndex
&& i >= firstIndex
&& i <= lastIndex) {
builder.add(i);
} else if (lastIndex < firstIndex
&& (i <= lastIndex
|| i >= firstIndex)) {
builder.add(i);
}
}
ImmutableList<Integer> writeSet = builder.build();
// writeSet may be one smaller than the configured write
// set size if we are excluding ourself
checkState(writeSet.size() == writeQuorumSize
|| (writeSet.size() == writeQuorumSize - 1));
return writeSet;
}
}
| 558 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/Events.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
enum Events {
/**
* Data integrity service has started
* It runs at an interval to check if a full integrity check is needed,
* and if so runs it.
*/
DATA_INTEG_SERVICE_START,
/**
* Data integrity service has been stopped.
*/
DATA_INTEG_SERVICE_STOP,
/**
* An exception was thrown on the data integrity service executor
* and never caught. This is a programmer error and should be reported
* as such.
*/
DATA_INTEG_SERVICE_UNCAUGHT_ERROR,
/**
* Data integrity service thread interrupted.
* This is non-fatal and indicates that the bookie is shutting down.
* The full check will resume once the bookie is started again.
*/
DATA_INTEG_SERVICE_INTERRUPTED,
/**
* An error occurred in the in the data integrity service loop.
* This normally indicates that an error occurred in the full check.
* The full check will be tried again.
* It could also indicate an error checking the NEEDS_INTEGRITY_CHECK
* flag, which indicates disk issues.
*/
DATA_INTEG_SERVICE_ERROR,
/**
* Mark a ledger as in-limbo. In limbo ledgers are ledgers for whose
* entries we cannot safely answer queries positively or negatively.
* These are ledgers which have not been closed and where this bookie
* appears in the final ensemble.
* We may have had an entry in the past, but due to disk failures or
* configuration changes it may not currently exist locally. However,
* we cannot tell clients that the entry doesn't exist, because the client
* would understand that to mean that it never existed, and this would
* break consistency in the ledger recovery protocol.
* For limbo ledgers, all entry level queries should throw an exception.
* We also mark the ledger as fenced at this point, as it may have been set
* on this ledger previously. This means no more writes for this ledger
* can come to this bookie.
*/
MARK_LIMBO,
/**
* An error occurred marking the ledger as fenced or as in-limbo.
* The most likely cause is a bad disk.
* This is a fatal error, as we cannot safely serve entries if we cannot
* set limbo and fence flags.
*/
LIMBO_OR_FENCE_ERROR,
/**
* Start the preboot check. The preboot check runs when some configuration
* has changed regarding the disk configuration. This may be simply a disk
* being added, or it could be the disks being wiped. The preboot check
* needs to check which ledgers we are supposed to store according to
* ledger metadata. Any unclosed ledgers which contain this bookie in its last
* ensemble must be marked as in-limbo, as we don't know if entries from that
* ledger have previously existed on this bookie.
* The preboot check doesn't copy any data. That is left up to the full check
* which can run in the background while the bookie is serving data for non-limbo
* ledgers.
* The preboot check has a runId associated which can be used to pull together
* all the events from the same run.
* The preboot check will set the NEEDS_INTEGRITY_CHECK flag on storage to
* trigger a full check after the bookie has booted.
*/
PREBOOT_START,
/**
* The preboot check has completed successfully. The event contains the number
* of ledgers that have been processed.
*/
PREBOOT_END,
/**
* An error occurred during the preboot check. This is a fatal error as we cannot
* safely serve data if the correct ledgers have not been marked as in-limbo. The
* error could be due to problems accessing the metadata store, or due to disk
* issues.
*/
PREBOOT_ERROR,
/**
* Preboot found an invalid ledger metadata. All ledger metadata must have at least
* one ensemble but the process found one with none.
*/
INVALID_METADATA,
/**
* Preboot must create a ledger that the bookie does not have but that metadata says
* the bookie should have. This can happen due to things like ensemble changes and
* when a ledger is closed. If the ledger cannot be created on the bookie then
* this error will cause preboot to fail.
*/
ENSURE_LEDGER_ERROR,
/**
* Initialized the full check. If we have cached metadata from a previous run, or
* the preboot check, then we use that. Otherwise we read the metadata from the
* metadata store.
* The full check goes through each ledger for which this bookie is supposed to
* store entries and checks that these entries exist on the bookie. If they do not
* exist, they are copied from another bookie.
* Each full check has a runId associated which can be used to find all events from
* the check.
*/
FULL_CHECK_INIT,
/**
* The full check has completed.
*/
FULL_CHECK_COMPLETE,
/**
* Start iterating through the ledger that should be on this bookie.
* The event is annotated with the number of ledgers which will be checked,
* which may be fewer that the total number of ledgers on the bookie as
* a previous run may have verified that some ledgers are ok and don't need
* to be checked.
*/
FULL_CHECK_START,
/**
* The full check has completed. This can be an info event or an error event.
* The event is annotated with the number of ledgers which were checked and found
* to be ok, the number that were found to be missing and the number for which
* errors occurred during the check. The missing ledgers have been deleted on
* the cluster, so don't need to be processed again. If there is a non-zero of
* ledgers with errors, the whole event is an error.
* An error for this event is non-fatal. Any ledgers which finished with error
* will be processed again the next time the full check runs. The full check
* continues retrying until there are no errors.
*/
FULL_CHECK_END,
/**
* An error occurred during the full check, but not while processing ledgers.
* This error could occur while flushing the ledger storage or clearing the
* full check flag.
*/
FULL_CHECK_ERROR,
/**
* The full check will use cached metadata.
*/
USE_CACHED_METADATA,
/**
* The full check will read the metadata from the metadata store.
*/
REFRESH_METADATA,
/**
* The NEEDS_INTEGRITY_CHECK will be cleared from the ledger storage.
* This signifies that the ledger storage contains everything it should
* and the full check does not need to be retried, even after reboot.
*/
CLEAR_INTEGCHECK_FLAG,
/**
* An error occurred while clearing the limbo flag for a ledger.
* This is generally a disk error. This error is non-fatal and the operation
* will be tried again on the next full check.
*/
CLEAR_LIMBO_ERROR,
/**
* Recover a ledger that has been marked as in limbo. This runs the ledger
* recovery algorithm to find the last entry of the ledger and mark the ledger
* as closed. As the ledger is marked as in-limbo locally, the current bookie
* not take part in the recovery process apart from initializing it.
* Once recovery completes successfully, the limbo flag can be cleared for the
* ledger.
*/
RECOVER_LIMBO_LEDGER,
/**
* The ledger has been deleted from the ledger metadata store, so we don't need
* to continue any processing on it.
*/
RECOVER_LIMBO_LEDGER_MISSING,
/**
* An error occurred during recovery. This could be due to not having enough
* bookies available to recover the ledger.
* The error is non-fatal. The recovery will be tried again on the next run of
* ledger recovery.
*/
RECOVER_LIMBO_LEDGER_ERROR,
/**
* An error occurred when trying to close the ledger handle of a recovered ledger.
* This shouldn't happen, as closing a recovered ledger should not involve any I/O.
* This error is non-fatal and the event is registered for informational purposes
* only.
*/
RECOVER_LIMBO_LEDGER_CLOSE_ERROR,
/**
* Start checking whether the entries for a ledger exist locally, and copying them
* if they do not.
*/
LEDGER_CHECK_AND_COPY_START,
/**
* Checking and copying has completed for a ledger. If any entry failed to copy
* this is a warning event. The ledger will be retried on the next run of the full
* check.
* This event is annotated with the number of entries copied, the number of errors
* and the total number of bytes copied for the ledger.
*/
LEDGER_CHECK_AND_COPY_END
}
| 559 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/DataIntegrityCheck.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
/**
* The interface for the data integrity check feature. This feature allows
* a bookie to handle data loss scenarios such as when running without
* the journal or after a disk failure has caused the loss of all data.
*/
public interface DataIntegrityCheck {
/**
* Run quick preboot check. This check should do enough to ensure that
* it is safe to complete the boot sequence without compromising correctness.
* To this end, if it finds that this bookie is part of the last ensemble of
* an unclosed ledger, it must prevent the bookie from being able store new
* entries for that ledger and must prevent the bookie from taking part in
* the discovery of the last entry of that ledger.
*/
CompletableFuture<Void> runPreBootCheck(String reason);
/**
* Whether we need to run a full check.
* This condition can be set by the runPreBoot() call to run a full check
* in the background once the bookie is running. This can later be used
* to run the full check periodically, or to exponentially backoff and retry
* when some transient condition prevents a ledger being fixed during a
* full check.
*/
boolean needsFullCheck() throws IOException;
/**
* Run full check of bookies local data. This check should ensure that
* if the metadata service states that it should have an entry, then it
* should have that entry. If the entry is missing, it should copy it
* from another available source.
*/
CompletableFuture<Void> runFullCheck();
}
| 560 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/EntryCopierImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Ticker;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSortedMap;
import io.netty.buffer.ByteBuf;
import io.netty.util.ReferenceCountUtil;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.LedgerStorage;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookieClient;
import org.apache.bookkeeper.proto.BookieProtocol;
/**
* Implementation for the EntryCopier interface. Handles the reading of entries
* from peer bookies.
*/
@Slf4j
public class EntryCopierImpl implements EntryCopier {
private static final long SINBIN_DURATION_MS = TimeUnit.MINUTES.toMillis(1);
private final BookieId bookieId;
private final BookieClient bookieClient;
private final LedgerStorage storage;
private final Ticker ticker;
private final SinBin sinBin;
public EntryCopierImpl(BookieId bookieId,
BookieClient bookieClient,
LedgerStorage storage,
Ticker ticker) {
this.bookieId = bookieId;
this.bookieClient = bookieClient;
this.storage = storage;
this.ticker = ticker;
this.sinBin = new SinBin(ticker);
}
@Override
public Batch newBatch(long ledgerId, LedgerMetadata metadata) throws IOException {
if (!storage.ledgerExists(ledgerId)) {
storage.setMasterKey(ledgerId, metadata.getPassword());
}
return new BatchImpl(bookieId, ledgerId, metadata, sinBin);
}
@VisibleForTesting
class BatchImpl implements Batch {
private final long ledgerId;
private final LedgerMetadata metadata;
private final SinBin sinBin;
private volatile ImmutableSortedMap<Long, WriteSets> writeSets;
BatchImpl(BookieId bookieId,
long ledgerId, LedgerMetadata metadata,
SinBin sinBin) {
this.ledgerId = ledgerId;
this.metadata = metadata;
this.sinBin = sinBin;
updateWriteSets();
}
private void updateWriteSets() {
// clear non-erroring bookies
// in theory we should be able to have a single set of writesets per ledger,
// however, if there are multiple ensembles, bookies will move around, and we
// still want to avoid erroring bookies
this.writeSets = preferredBookieIndices(bookieId, metadata,
sinBin.getErrorBookies(), ledgerId)
.entrySet().stream().collect(
ImmutableSortedMap.toImmutableSortedMap(
Comparator.naturalOrder(),
e -> e.getKey(),
e -> new WriteSets(e.getValue(),
metadata.getEnsembleSize(),
metadata.getWriteQuorumSize())));
}
@VisibleForTesting
void notifyBookieError(BookieId bookie) {
if (sinBin.addFailed(bookie)) {
updateWriteSets();
}
}
@Override
public CompletableFuture<Long> copyFromAvailable(long entryId) {
if (entryId < 0) {
throw new IllegalArgumentException(
String.format("Entry ID (%d) can't be less than 0", entryId));
}
if (metadata.isClosed() && entryId > metadata.getLastEntryId()) {
throw new IllegalArgumentException(
String.format("Invalid entry id (%d), last entry for ledger %d is %d",
entryId, ledgerId, metadata.getLastEntryId()));
}
CompletableFuture<Long> promise = new CompletableFuture<>();
fetchEntry(entryId).whenComplete((buffer, exception) -> {
if (exception != null) {
promise.completeExceptionally(exception);
} else {
try {
long length = buffer.readableBytes();
storage.addEntry(buffer);
promise.complete(length);
} catch (Throwable t) {
promise.completeExceptionally(t);
} finally {
ReferenceCountUtil.release(buffer);
}
}
});
return promise;
}
@VisibleForTesting
CompletableFuture<ByteBuf> fetchEntry(long entryId) {
List<BookieId> ensemble = metadata.getEnsembleAt(entryId);
final Map.Entry<Long, WriteSets> writeSetsForEntryId = this.writeSets
.floorEntry(entryId);
if (writeSetsForEntryId == null) {
log.error("writeSets for entryId {} not found, writeSets {}", entryId, writeSets);
throw new IllegalStateException("writeSets for entryId: " + entryId + " not found");
}
ImmutableList<Integer> writeSet = writeSetsForEntryId
.getValue()
.getForEntry(entryId);
int attempt = 0;
CompletableFuture<ByteBuf> promise = new CompletableFuture<>();
fetchRetryLoop(entryId, attempt,
ensemble, writeSet,
promise, Optional.empty());
return promise;
}
private void fetchRetryLoop(long entryId, int attempt,
List<BookieId> ensemble,
ImmutableList<Integer> writeSet,
CompletableFuture<ByteBuf> promise,
Optional<Throwable> firstException) {
if (attempt >= writeSet.size()) {
promise.completeExceptionally(
firstException.orElse(new BKException.BKReadException()));
return;
}
BookieId bookie = ensemble.get(writeSet.get(attempt));
readEntry(bookie, ledgerId, entryId)
.whenComplete((buffer, exception) -> {
if (exception != null) {
notifyBookieError(bookie);
Optional<Throwable> firstException1 =
firstException.isPresent() ? firstException : Optional.of(exception);
fetchRetryLoop(entryId, attempt + 1,
ensemble, writeSet, promise, firstException1);
} else {
promise.complete(buffer);
}
});
}
}
// convert callback api to future api
private CompletableFuture<ByteBuf> readEntry(BookieId bookieId,
long ledgerId, long entryId) {
CompletableFuture<ByteBuf> promise = new CompletableFuture<>();
bookieClient.readEntry(bookieId, ledgerId, entryId,
(rc, ledgerId1, entryId1, buffer, ctx1) -> {
if (rc != BKException.Code.OK) {
promise.completeExceptionally(BKException.create(rc));
} else {
buffer.retain();
promise.complete(buffer);
}
}, null, BookieProtocol.FLAG_NONE);
return promise;
}
/**
* Generate a map of preferred bookie indices. For each ensemble, generate the order
* in which bookies should be tried for entries, notwithstanding errors.
* For example, if a e5,w2,a2 ensemble has the bookies:
* [bookie1, bookie2, bookie3, bookie4, bookie5]
* and the current bookie is bookie2, then we should return something like:
* [4, 2, 0, 3]
* Then when retrieving an entry, even though it is only written to 2, we try the bookie
* in the order from this list. This will cause more requests to go to the same bookie,
* which should give us the benefit of read locality.
* We don't want to simply sort by bookie id, as that would cause the same bookies to be
* loaded for all ensembles.
* Bookies which have presented errors are always tried last.
*/
@VisibleForTesting
static ImmutableSortedMap<Long, ImmutableList<Integer>> preferredBookieIndices(
BookieId bookieId,
LedgerMetadata metadata,
Set<BookieId> errorBookies,
long seed) {
return metadata.getAllEnsembles().entrySet().stream()
.collect(ImmutableSortedMap.toImmutableSortedMap(
Comparator.naturalOrder(),
e -> e.getKey(),
e -> {
List<BookieId> ensemble = e.getValue();
// get indices of the interesting bookies
int myIndex = ensemble.indexOf(bookieId);
Set<Integer> errorIndices = errorBookies.stream()
.map(b -> ensemble.indexOf(b)).collect(Collectors.toSet());
// turn bookies into positions and filter out my own
// bookie id (we're not going to try to read from outself)
List<Integer> indices = IntStream.range(0, ensemble.size())
.filter(i -> i != myIndex).boxed().collect(Collectors.toList());
// shuffle the indices based seed (normally ledgerId)
Collections.shuffle(indices, new Random(seed));
// Move the error bookies to the end
// Collections#sort is stable, so everything else remains the same
Collections.sort(indices, (a, b) -> {
boolean aErr = errorIndices.contains(a);
boolean bErr = errorIndices.contains(b);
if (aErr && !bErr) {
return 1;
} else if (!aErr && bErr) {
return -1;
} else {
return 0;
}
});
return ImmutableList.copyOf(indices);
}));
}
@VisibleForTesting
static class SinBin {
private final Ticker ticker;
private final ConcurrentMap<BookieId, Long> errorBookies = new ConcurrentHashMap<>();
SinBin(Ticker ticker) {
this.ticker = ticker;
}
/**
* Returns true if this is the first error for this bookie.
*/
boolean addFailed(BookieId bookie) {
long newDeadline = TimeUnit.NANOSECONDS.toMillis(ticker.read()) + SINBIN_DURATION_MS;
Long oldDeadline = errorBookies.put(bookie, newDeadline);
return oldDeadline == null;
}
Set<BookieId> getErrorBookies() {
long now = TimeUnit.NANOSECONDS.toMillis(ticker.read());
Iterator<Map.Entry<BookieId, Long>> iterator = errorBookies.entrySet().iterator();
while (iterator.hasNext()) {
if (iterator.next().getValue() < now) {
iterator.remove();
}
}
return errorBookies.keySet();
}
}
}
| 561 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/DataIntegrityService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.datainteg;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.component.AbstractLifecycleComponent;
import org.apache.bookkeeper.server.conf.BookieConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* An abstract lifecycle component that can perform data integrity checking.
*/
@Slf4j
public class DataIntegrityService extends AbstractLifecycleComponent<BookieConfiguration> {
private final DataIntegrityCheck check;
private final ScheduledExecutorService scheduler;
private ScheduledFuture<?> scheduledFuture;
public DataIntegrityService(BookieConfiguration conf,
StatsLogger statsLogger,
DataIntegrityCheck check) {
super("data-integ", conf, statsLogger);
this.check = check;
scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder()
.setNameFormat("bookie-data-integ-%d")
.setUncaughtExceptionHandler(
(t, ex) -> log.error("Event: {}, thread: {}",
Events.DATA_INTEG_SERVICE_UNCAUGHT_ERROR,
t, ex))
.build());
scheduledFuture = null;
}
// allow tests to reduce interval
protected int interval() {
return 3;
}
protected TimeUnit intervalUnit() {
return TimeUnit.SECONDS;
}
@Override
protected void doStart() {
log.info("Event: {}, interval: {}, intervalUnit: {}",
Events.DATA_INTEG_SERVICE_START, interval(), intervalUnit());
synchronized (this) {
scheduledFuture = scheduler.scheduleAtFixedRate(() -> {
try {
if (check.needsFullCheck()) {
check.runFullCheck().get();
}
} catch (InterruptedException ie) {
log.warn("Event: {}", Events.DATA_INTEG_SERVICE_INTERRUPTED, ie);
Thread.currentThread().interrupt();
} catch (Throwable t) {
log.error("Event: {}", Events.DATA_INTEG_SERVICE_ERROR, t);
}
}, 0, interval(), intervalUnit());
}
}
@Override
protected void doStop() {
log.info("Event: {}", Events.DATA_INTEG_SERVICE_STOP);
synchronized (this) {
if (scheduledFuture != null) {
scheduledFuture.cancel(true);
scheduledFuture = null;
}
}
}
@Override
protected void doClose() throws IOException {
synchronized (this) {
// just in case stop didn't get called, the scheduledfuture
// would stop the scheduler from shutting down
if (scheduledFuture != null) {
scheduledFuture.cancel(true);
scheduledFuture = null;
}
}
scheduler.shutdown();
}
}
| 562 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/datainteg/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Package of the classes for defining bookie stats.
*/
package org.apache.bookkeeper.bookie.datainteg; | 563 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/JournalStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.FORCE_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_CREATION_LATENCY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FLUSH_LATENCY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_WRITE_BATCH_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_WRITE_BATCH_ENTRIES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_WRITE_ENQUEUE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_WRITE_GROUPING_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_FORCE_WRITE_QUEUE_SIZE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_MEMORY_MAX;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_MEMORY_USED;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_NUM_FLUSH_EMPTY_QUEUE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_NUM_FLUSH_MAX_OUTSTANDING_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_NUM_FLUSH_MAX_WAIT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_PROCESS_TIME_LATENCY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_QUEUE_LATENCY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_QUEUE_SIZE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_SYNC;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_WRITE_BYTES;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.bookie.BookKeeperServerStats;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for journal related stats.
*/
@StatsDoc(
name = JOURNAL_SCOPE,
category = CATEGORY_SERVER,
help = "Journal related stats"
)
@Getter
public class JournalStats {
@StatsDoc(
name = JOURNAL_ADD_ENTRY,
help = "operation stats of recording addEntry requests in the journal",
parent = ADD_ENTRY
)
private final OpStatsLogger journalAddEntryStats;
@StatsDoc(
name = JOURNAL_FORCE_LEDGER,
help = "operation stats of recording forceLedger requests in the journal",
parent = FORCE_LEDGER
)
private final OpStatsLogger journalForceLedgerStats;
@StatsDoc(
name = JOURNAL_SYNC,
help = "operation stats of syncing data to journal disks",
parent = JOURNAL_ADD_ENTRY,
happensAfter = JOURNAL_FORCE_WRITE_ENQUEUE
)
private final OpStatsLogger journalSyncStats;
@StatsDoc(
name = JOURNAL_FORCE_WRITE_ENQUEUE,
help = "operation stats of enqueueing force write requests to force write queue",
parent = JOURNAL_ADD_ENTRY,
happensAfter = JOURNAL_PROCESS_TIME_LATENCY
)
private final OpStatsLogger fwEnqueueTimeStats;
@StatsDoc(
name = JOURNAL_CREATION_LATENCY,
help = "operation stats of creating journal files",
parent = JOURNAL_PROCESS_TIME_LATENCY
)
private final OpStatsLogger journalCreationStats;
@StatsDoc(
name = JOURNAL_FLUSH_LATENCY,
help = "operation stats of flushing data from memory to filesystem (but not yet fsyncing to disks)",
parent = JOURNAL_PROCESS_TIME_LATENCY,
happensAfter = JOURNAL_CREATION_LATENCY
)
private final OpStatsLogger journalFlushStats;
@StatsDoc(
name = JOURNAL_PROCESS_TIME_LATENCY,
help = "operation stats of processing requests in a journal (from dequeue an item to finish processing it)",
parent = JOURNAL_ADD_ENTRY,
happensAfter = JOURNAL_QUEUE_LATENCY
)
private final OpStatsLogger journalProcessTimeStats;
@StatsDoc(
name = JOURNAL_QUEUE_LATENCY,
help = "operation stats of enqueuing requests to a journal",
parent = JOURNAL_ADD_ENTRY
)
private final OpStatsLogger journalQueueStats;
@StatsDoc(
name = JOURNAL_FORCE_WRITE_GROUPING_COUNT,
help = "The distribution of number of force write requests grouped in a force write"
)
private final OpStatsLogger forceWriteGroupingCountStats;
@StatsDoc(
name = JOURNAL_FORCE_WRITE_BATCH_ENTRIES,
help = "The distribution of number of entries grouped together into a force write request"
)
private final OpStatsLogger forceWriteBatchEntriesStats;
@StatsDoc(
name = JOURNAL_FORCE_WRITE_BATCH_BYTES,
help = "The distribution of number of bytes grouped together into a force write request"
)
private final OpStatsLogger forceWriteBatchBytesStats;
@StatsDoc(
name = JOURNAL_QUEUE_SIZE,
help = "The journal queue size"
)
private final Counter journalQueueSize;
@StatsDoc(
name = JOURNAL_FORCE_WRITE_QUEUE_SIZE,
help = "The force write queue size"
)
private final Counter forceWriteQueueSize;
@StatsDoc(
name = JOURNAL_NUM_FLUSH_MAX_WAIT,
help = "The number of journal flushes triggered by MAX_WAIT time"
)
private final Counter flushMaxWaitCounter;
@StatsDoc(
name = JOURNAL_NUM_FLUSH_MAX_OUTSTANDING_BYTES,
help = "The number of journal flushes triggered by MAX_OUTSTANDING_BYTES"
)
private final Counter flushMaxOutstandingBytesCounter;
@StatsDoc(
name = JOURNAL_NUM_FLUSH_EMPTY_QUEUE,
help = "The number of journal flushes triggered when journal queue becomes empty"
)
private final Counter flushEmptyQueueCounter;
@StatsDoc(
name = JOURNAL_WRITE_BYTES,
help = "The number of bytes appended to the journal"
)
private final Counter journalWriteBytes;
@StatsDoc(
name = JOURNAL_MEMORY_MAX,
help = "The max amount of memory in bytes that can be used by the bookie journal"
)
private final Gauge<Long> journalMemoryMaxStats;
@StatsDoc(
name = JOURNAL_MEMORY_USED,
help = "The actual amount of memory in bytes currently used by the bookie journal"
)
private final Gauge<Long> journalMemoryUsedStats;
public JournalStats(StatsLogger statsLogger, final long maxJournalMemoryBytes,
Supplier<Long> currentJournalMemoryBytes) {
journalAddEntryStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_ADD_ENTRY);
journalForceLedgerStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FORCE_LEDGER);
journalSyncStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_SYNC);
fwEnqueueTimeStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FORCE_WRITE_ENQUEUE);
journalCreationStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_CREATION_LATENCY);
journalFlushStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FLUSH_LATENCY);
journalQueueStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_QUEUE_LATENCY);
journalProcessTimeStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_PROCESS_TIME_LATENCY);
forceWriteGroupingCountStats =
statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FORCE_WRITE_GROUPING_COUNT);
forceWriteBatchEntriesStats =
statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FORCE_WRITE_BATCH_ENTRIES);
forceWriteBatchBytesStats = statsLogger.getOpStatsLogger(BookKeeperServerStats.JOURNAL_FORCE_WRITE_BATCH_BYTES);
journalQueueSize = statsLogger.getCounter(BookKeeperServerStats.JOURNAL_QUEUE_SIZE);
forceWriteQueueSize = statsLogger.getCounter(BookKeeperServerStats.JOURNAL_FORCE_WRITE_QUEUE_SIZE);
flushMaxWaitCounter = statsLogger.getCounter(BookKeeperServerStats.JOURNAL_NUM_FLUSH_MAX_WAIT);
flushMaxOutstandingBytesCounter =
statsLogger.getCounter(BookKeeperServerStats.JOURNAL_NUM_FLUSH_MAX_OUTSTANDING_BYTES);
flushEmptyQueueCounter = statsLogger.getCounter(BookKeeperServerStats.JOURNAL_NUM_FLUSH_EMPTY_QUEUE);
journalWriteBytes = statsLogger.getCounter(BookKeeperServerStats.JOURNAL_WRITE_BYTES);
journalMemoryMaxStats = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return maxJournalMemoryBytes;
}
@Override
public Long getSample() {
return maxJournalMemoryBytes;
}
};
statsLogger.registerGauge(JOURNAL_MEMORY_MAX, journalMemoryMaxStats);
journalMemoryUsedStats = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return -1L;
}
@Override
public Long getSample() {
return currentJournalMemoryBytes.get();
}
};
statsLogger.registerGauge(JOURNAL_MEMORY_USED, journalMemoryUsedStats);
}
}
| 564 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/BookieStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_ADD_ENTRY_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_FORCE_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_GET_LIST_OF_ENTRIES_OF_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_RECOVERY_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.GET_LIST_OF_ENTRIES_OF_LEDGER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_DIRS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_QUEUE_MAX_SIZE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.READ_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.WRITE_BYTES;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for bookie related stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Bookie related stats"
)
@Getter
public class BookieStats {
// Expose Stats
final StatsLogger statsLogger;
@StatsDoc(name = WRITE_BYTES, help = "total bytes written to a bookie")
private final Counter writeBytes;
@StatsDoc(name = READ_BYTES, help = "total bytes read from a bookie")
private final Counter readBytes;
@StatsDoc(name = BOOKIE_FORCE_LEDGER, help = "total force operations occurred on a bookie")
private final Counter forceLedgerOps;
// Bookie Operation Latency Stats
@StatsDoc(
name = BOOKIE_ADD_ENTRY,
help = "operations stats of AddEntry on a bookie",
parent = ADD_ENTRY
)
private final OpStatsLogger addEntryStats;
@StatsDoc(name = BOOKIE_RECOVERY_ADD_ENTRY, help = "operation stats of RecoveryAddEntry on a bookie")
private final OpStatsLogger recoveryAddEntryStats;
@StatsDoc(
name = BOOKIE_READ_ENTRY,
help = "operation stats of ReadEntry on a bookie",
parent = READ_ENTRY
)
private final OpStatsLogger readEntryStats;
@StatsDoc(
name = BOOKIE_GET_LIST_OF_ENTRIES_OF_LEDGER,
help = "operation stats of GetListOfEntriesOfLedger on a bookie",
parent = GET_LIST_OF_ENTRIES_OF_LEDGER
)
private final OpStatsLogger getListOfEntriesOfLedgerStats;
// Bookie Operation Bytes Stats
@StatsDoc(name = BOOKIE_ADD_ENTRY_BYTES, help = "bytes stats of AddEntry on a bookie")
private final OpStatsLogger addBytesStats;
@StatsDoc(name = BOOKIE_READ_ENTRY_BYTES, help = "bytes stats of ReadEntry on a bookie")
private final OpStatsLogger readBytesStats;
@StatsDoc(name = JOURNAL_DIRS, help = "number of configured journal directories")
private final Gauge<Integer> journalDirsGauge;
@StatsDoc(name = JOURNAL_QUEUE_MAX_SIZE, help = "maximum length of a journal queue")
private final Gauge<Integer> journalQueueMaxQueueSizeGauge;
public BookieStats(StatsLogger statsLogger, int numJournalDirs, int maxJournalQueueSize) {
this.statsLogger = statsLogger;
writeBytes = statsLogger.getCounter(WRITE_BYTES);
readBytes = statsLogger.getCounter(READ_BYTES);
forceLedgerOps = statsLogger.getCounter(BOOKIE_FORCE_LEDGER);
addEntryStats = statsLogger.getOpStatsLogger(BOOKIE_ADD_ENTRY);
recoveryAddEntryStats = statsLogger.getOpStatsLogger(BOOKIE_RECOVERY_ADD_ENTRY);
readEntryStats = statsLogger.getOpStatsLogger(BOOKIE_READ_ENTRY);
getListOfEntriesOfLedgerStats = statsLogger.getOpStatsLogger(BOOKIE_GET_LIST_OF_ENTRIES_OF_LEDGER);
addBytesStats = statsLogger.getOpStatsLogger(BOOKIE_ADD_ENTRY_BYTES);
readBytesStats = statsLogger.getOpStatsLogger(BOOKIE_READ_ENTRY_BYTES);
journalDirsGauge = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return numJournalDirs;
}
@Override
public Integer getSample() {
return numJournalDirs;
}
};
statsLogger.registerGauge(JOURNAL_DIRS, journalDirsGauge);
journalQueueMaxQueueSizeGauge = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return maxJournalQueueSize;
}
@Override
public Integer getSample() {
return maxJournalQueueSize;
}
};
statsLogger.registerGauge(JOURNAL_QUEUE_MAX_SIZE, journalQueueMaxQueueSizeGauge);
}
}
| 565 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/EntryMemTableStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_ADD_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_FLUSH_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_GET_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_PUT_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_SNAPSHOT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_THROTTLING;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SKIP_LIST_THROTTLING_LATENCY;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for memtable related stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "EntryMemTable related stats"
)
@Getter
public class EntryMemTableStats {
@StatsDoc(
name = SKIP_LIST_SNAPSHOT,
help = "operation stats of taking memtable snapshots"
)
private final OpStatsLogger snapshotStats;
@StatsDoc(
name = SKIP_LIST_PUT_ENTRY,
help = "operation stats of putting entries to memtable",
parent = BOOKIE_ADD_ENTRY
)
private final OpStatsLogger putEntryStats;
@StatsDoc(
name = SKIP_LIST_GET_ENTRY,
help = "operation stats of getting entries from memtable",
parent = BOOKIE_READ_ENTRY
)
private final OpStatsLogger getEntryStats;
@StatsDoc(
name = SKIP_LIST_FLUSH_BYTES,
help = "The number of bytes flushed from memtable to entry log files"
)
private final Counter flushBytesCounter;
@StatsDoc(
name = SKIP_LIST_THROTTLING,
help = "The number of requests throttled due to memtables are full"
)
private final Counter throttlingCounter;
@StatsDoc(
name = SKIP_LIST_THROTTLING_LATENCY,
help = "The distribution of request throttled duration"
)
private final OpStatsLogger throttlingStats;
public EntryMemTableStats(StatsLogger statsLogger) {
this.snapshotStats = statsLogger.getOpStatsLogger(SKIP_LIST_SNAPSHOT);
this.putEntryStats = statsLogger.getOpStatsLogger(SKIP_LIST_PUT_ENTRY);
this.getEntryStats = statsLogger.getOpStatsLogger(SKIP_LIST_GET_ENTRY);
this.flushBytesCounter = statsLogger.getCounter(SKIP_LIST_FLUSH_BYTES);
this.throttlingCounter = statsLogger.getCounter(SKIP_LIST_THROTTLING);
this.throttlingStats = statsLogger.getOpStatsLogger(SKIP_LIST_THROTTLING_LATENCY);
}
}
| 566 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/IndexInMemPageMgrStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.INDEX_INMEM_ILLEGAL_STATE_DELETE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.INDEX_INMEM_ILLEGAL_STATE_RESET;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for {@link org.apache.bookkeeper.bookie.IndexInMemPageMgr} stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Index InMemPage Manager related stats"
)
@Getter
public class IndexInMemPageMgrStats {
// Stats
@StatsDoc(
name = INDEX_INMEM_ILLEGAL_STATE_RESET,
help = "The number of index pages detected as in illegal state when resetting"
)
private final Counter illegalStateResetCounter;
@StatsDoc(
name = INDEX_INMEM_ILLEGAL_STATE_DELETE,
help = "The number of index pages detected as in illegal state when deleting"
)
private final Counter illegalStateDeleteCounter;
public IndexInMemPageMgrStats(StatsLogger statsLogger) {
illegalStateResetCounter = statsLogger.getCounter(INDEX_INMEM_ILLEGAL_STATE_RESET);
illegalStateDeleteCounter = statsLogger.getCounter(INDEX_INMEM_ILLEGAL_STATE_DELETE);
}
}
| 567 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/GarbageCollectorStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ACTIVE_ENTRY_LOG_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ACTIVE_ENTRY_LOG_SPACE_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ACTIVE_LEDGER_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.DELETED_LEDGER_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.MAJOR_COMPACTION_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.MINOR_COMPACTION_COUNT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.RECLAIMED_COMPACTION_SPACE_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.RECLAIMED_DELETION_SPACE_BYTES;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.RECLAIM_FAILED_TO_DELETE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.THREAD_RUNTIME;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for gc stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Garbage Collector related stats"
)
@Getter
public class GarbageCollectorStats {
final StatsLogger statsLogger;
@StatsDoc(
name = MINOR_COMPACTION_COUNT,
help = "Number of minor compactions"
)
private final Counter minorCompactionCounter;
@StatsDoc(
name = MAJOR_COMPACTION_COUNT,
help = "Number of major compactions"
)
private final Counter majorCompactionCounter;
@StatsDoc(
name = RECLAIMED_DELETION_SPACE_BYTES,
help = "Number of disk space bytes reclaimed via deleting entry log files"
)
private final Counter reclaimedSpaceViaDeletes;
@StatsDoc(
name = RECLAIMED_COMPACTION_SPACE_BYTES,
help = "Number of disk space bytes reclaimed via compacting entry log files"
)
private final Counter reclaimedSpaceViaCompaction;
@StatsDoc(
name = RECLAIM_FAILED_TO_DELETE,
help = "Number of reclaim failed counts when deleting entry log files"
)
private final Counter reclaimFailedToDelete;
@StatsDoc(
name = DELETED_LEDGER_COUNT,
help = "Number of ledgers deleted by garbage collection"
)
private final Counter deletedLedgerCounter;
@StatsDoc(
name = THREAD_RUNTIME,
help = "Operation stats of garbage collections"
)
private final OpStatsLogger gcThreadRuntime;
@StatsDoc(
name = ACTIVE_ENTRY_LOG_COUNT,
help = "Current number of active entry log files"
)
private final Gauge<Integer> activeEntryLogCountGauge;
@StatsDoc(
name = ACTIVE_ENTRY_LOG_SPACE_BYTES,
help = "Current number of active entry log space bytes"
)
private final Gauge<Long> activeEntryLogSpaceBytesGauge;
@StatsDoc(
name = ACTIVE_LEDGER_COUNT,
help = "Current number of active ledgers"
)
private final Gauge<Integer> activeLedgerCountGauge;
public GarbageCollectorStats(StatsLogger statsLogger,
Supplier<Integer> activeEntryLogCountSupplier,
Supplier<Long> activeEntryLogSpaceBytesSupplier,
Supplier<Integer> activeLedgerCountSupplier) {
this.statsLogger = statsLogger;
this.minorCompactionCounter = statsLogger.getCounter(MINOR_COMPACTION_COUNT);
this.majorCompactionCounter = statsLogger.getCounter(MAJOR_COMPACTION_COUNT);
this.reclaimedSpaceViaCompaction = statsLogger.getCounter(RECLAIMED_COMPACTION_SPACE_BYTES);
this.reclaimedSpaceViaDeletes = statsLogger.getCounter(RECLAIMED_DELETION_SPACE_BYTES);
this.reclaimFailedToDelete = statsLogger.getCounter(RECLAIM_FAILED_TO_DELETE);
this.gcThreadRuntime = statsLogger.getOpStatsLogger(THREAD_RUNTIME);
this.deletedLedgerCounter = statsLogger.getCounter(DELETED_LEDGER_COUNT);
this.activeEntryLogCountGauge = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
return activeEntryLogCountSupplier.get();
}
};
statsLogger.registerGauge(ACTIVE_ENTRY_LOG_COUNT, activeEntryLogCountGauge);
this.activeEntryLogSpaceBytesGauge = new Gauge<Long>() {
@Override
public Long getDefaultValue() {
return 0L;
}
@Override
public Long getSample() {
return activeEntryLogSpaceBytesSupplier.get();
}
};
statsLogger.registerGauge(ACTIVE_ENTRY_LOG_SPACE_BYTES, activeEntryLogSpaceBytesGauge);
this.activeLedgerCountGauge = new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
return activeLedgerCountSupplier.get();
}
};
statsLogger.registerGauge(ACTIVE_LEDGER_COUNT, activeLedgerCountGauge);
}
}
| 568 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Package of the classes for defining bookie stats.
*/
package org.apache.bookkeeper.bookie.stats; | 569 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/stats/IndexPersistenceMgrStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie.stats;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LEDGER_CACHE_NUM_EVICTED_LEDGERS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.PENDING_GET_FILE_INFO;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.READ_FILE_INFO_CACHE_SIZE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.WRITE_FILE_INFO_CACHE_SIZE;
import java.util.function.Supplier;
import lombok.Getter;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
/**
* A umbrella class for {@link org.apache.bookkeeper.bookie.IndexPersistenceMgr} stats.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Index Persistence Manager related stats"
)
@Getter
public class IndexPersistenceMgrStats {
// Stats
@StatsDoc(
name = LEDGER_CACHE_NUM_EVICTED_LEDGERS,
help = "Number of ledgers evicted from ledger caches"
)
private final Counter evictedLedgersCounter;
@StatsDoc(
name = PENDING_GET_FILE_INFO,
help = "Number of pending get-file-info requests"
)
private final Counter pendingGetFileInfoCounter;
@StatsDoc(
name = WRITE_FILE_INFO_CACHE_SIZE,
help = "Current write file info cache size"
)
private final Gauge<Number> writeFileInfoCacheSizeGauge;
@StatsDoc(
name = READ_FILE_INFO_CACHE_SIZE,
help = "Current read file info cache size"
)
private final Gauge<Number> readFileInfoCacheSizeGauge;
public IndexPersistenceMgrStats(StatsLogger statsLogger,
Supplier<Number> writeFileInfoCacheSizeSupplier,
Supplier<Number> readFileInfoCacheSizeSupplier) {
evictedLedgersCounter = statsLogger.getCounter(LEDGER_CACHE_NUM_EVICTED_LEDGERS);
pendingGetFileInfoCounter = statsLogger.getCounter(PENDING_GET_FILE_INFO);
writeFileInfoCacheSizeGauge = new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return 0;
}
@Override
public Number getSample() {
return writeFileInfoCacheSizeSupplier.get();
}
};
statsLogger.registerGauge(WRITE_FILE_INFO_CACHE_SIZE, writeFileInfoCacheSizeGauge);
readFileInfoCacheSizeGauge = new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return 0;
}
@Override
public Number getSample() {
return readFileInfoCacheSizeSupplier.get();
}
};
statsLogger.registerGauge(READ_FILE_INFO_CACHE_SIZE, readFileInfoCacheSizeGauge);
}
}
| 570 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/EtcdRegistrationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.google.common.collect.Sets;
import io.etcd.jetcd.Client;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import lombok.Cleanup;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.discover.RegistrationClient.RegistrationListener;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Version.Occurred;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
/**
* Test etcd based bookie registration.
*/
@Slf4j
public class EtcdRegistrationTest extends EtcdTestBase {
static BookieId newBookie(int i) {
return BookieId.parse("127.0.0.1:" + (3181 + i));
}
@Rule
public final TestName runtime = new TestName();
private String scope;
private RegistrationClient regClient;
protected static RegistrationListener newRegistrationListener(
LinkedBlockingQueue<Versioned<Set<BookieId>>> notifications) {
return bookies -> {
log.info("Received new bookies: {}", bookies);
try {
notifications.put(bookies);
} catch (InterruptedException e) {
log.error("Interrupted at enqueuing updated key set", e);
}
};
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
this.scope = RandomStringUtils.randomAlphabetic(16);
this.regClient = new EtcdRegistrationClient(scope, etcdClient);
}
@After
@Override
public void tearDown() throws Exception {
this.regClient.close();
super.tearDown();
}
interface MultiBookiesTester {
void test(String scope, int numBookies, boolean readonly) throws Exception;
}
private static void runNumBookiesTest(final String scope,
final int numBookies,
final boolean readonly,
MultiBookiesTester tester) throws Exception {
final List<EtcdRegistrationManager> bookies = createNumBookies(readonly, numBookies, scope);
try {
tester.test(scope, numBookies, readonly);
} finally {
bookies.forEach(EtcdRegistrationManager::close);
}
}
@Test
public void testRegisterWritableBookies() throws Exception {
testRegisterBookie(false);
}
@Test
public void testRegisterReadonlyBookies() throws Exception {
testRegisterBookie(true);
}
private void testRegisterBookie(boolean readonly) throws Exception {
runNumBookiesTest(scope, 3, readonly, (scope, numBookies, ro) -> {
Set<BookieId> expectedBookies = Sets.newHashSet();
for (int i = 0; i < numBookies; i++) {
expectedBookies.add(newBookie(i));
}
Set<BookieId> writableBookies = result(regClient.getWritableBookies()).getValue();
Set<BookieId> readonlyBookies = result(regClient.getReadOnlyBookies()).getValue();
if (ro) {
assertEquals(0, writableBookies.size());
assertEquals(numBookies, readonlyBookies.size());
assertEquals(expectedBookies, readonlyBookies);
} else {
assertEquals(0, readonlyBookies.size());
assertEquals(numBookies, writableBookies.size());
assertEquals(expectedBookies, writableBookies);
}
});
}
@Test
public void testWatchWritableBookies() throws Exception {
testWatchBookies(false);
}
@Test
public void testWatchReadonlyBookies() throws Exception {
testWatchBookies(true);
}
private void testWatchBookies(boolean readonly) throws Exception {
LinkedBlockingQueue<Versioned<Set<BookieId>>> writableChanges = new LinkedBlockingQueue<>();
LinkedBlockingQueue<Versioned<Set<BookieId>>> readonlyChanges = new LinkedBlockingQueue<>();
result(regClient.watchReadOnlyBookies(newRegistrationListener(readonlyChanges)));
result(regClient.watchWritableBookies(newRegistrationListener(writableChanges)));
Versioned<Set<BookieId>> versionedBookies = writableChanges.take();
assertTrue(versionedBookies.getValue().isEmpty());
versionedBookies = readonlyChanges.take();
assertTrue(versionedBookies.getValue().isEmpty());
final int numBookies = 3;
final List<EtcdRegistrationManager> bookies = createNumBookies(readonly, numBookies, scope, 1);
LinkedBlockingQueue<Versioned<Set<BookieId>>> changes;
if (readonly) {
changes = readonlyChanges;
} else {
changes = writableChanges;
}
Version preVersion = new LongVersion(-1);
Set<BookieId> expectedBookies = new HashSet<>();
for (int i = 0; i < numBookies; i++) {
BookieId address = newBookie(i);
expectedBookies.add(address);
versionedBookies = changes.take();
Version curVersion = versionedBookies.getVersion();
assertEquals(Occurred.AFTER, curVersion.compare(preVersion));
assertEquals(expectedBookies, versionedBookies.getValue());
preVersion = curVersion;
}
bookies.forEach(EtcdRegistrationManager::close);
for (int i = 0; i < numBookies; i++) {
versionedBookies = changes.take();
Version curVersion = versionedBookies.getVersion();
assertEquals(Occurred.AFTER, curVersion.compare(preVersion));
assertEquals(numBookies - i - 1, versionedBookies.getValue().size());
preVersion = curVersion;
}
if (readonly) {
assertEquals(0, writableChanges.size());
} else {
assertEquals(0, readonlyChanges.size());
}
}
private static List<EtcdRegistrationManager> createNumBookies(boolean readonly,
int numBookies,
String scope,
long ttlSeconds) throws BookieException {
List<EtcdRegistrationManager> bookies = new ArrayList<>(numBookies);
for (int i = 0; i < numBookies; i++) {
Client client = newEtcdClient();
EtcdRegistrationManager regMgr = new EtcdRegistrationManager(client, scope, ttlSeconds);
bookies.add(regMgr);
regMgr.registerBookie(newBookie(i), readonly, BookieServiceInfo.EMPTY);
}
return bookies;
}
private static List<EtcdRegistrationManager> createNumBookies(boolean readonly,
int numBookies,
String scope) throws BookieException {
return createNumBookies(readonly, numBookies, scope, 60);
}
@Test
public void testRegisterBookieWaitUntilPreviousExpiredSuccess() throws Exception {
long ttlSeconds = 1;
long leaseId = -0xabcd;
BookieId bookieId = BookieId.parse(runtime.getMethodName() + ":3181");
try (EtcdRegistrationManager regManager = new EtcdRegistrationManager(
newEtcdClient(), scope, ttlSeconds)
) {
regManager.registerBookie(bookieId, false, BookieServiceInfo.EMPTY);
leaseId = regManager.getBkRegister().getLeaseId();
log.info("Registered bookie under scope '{}' with lease = {}", scope, leaseId);
}
assertNotEquals(-0xabcd, leaseId);
final long prevLeaseId = leaseId;
try (EtcdRegistrationManager regManager = new EtcdRegistrationManager(
newEtcdClient(), scope, 100000 * ttlSeconds)
) {
regManager.registerBookie(bookieId, false, BookieServiceInfo.EMPTY);
leaseId = regManager.getBkRegister().getLeaseId();
log.info("Registered bookie under scope '{}' with new lease = {}", scope, leaseId);
}
assertNotEquals(prevLeaseId, leaseId);
}
@Test
public void testRegisterBookieWaitUntilPreviousExpiredFailure() throws Exception {
long ttlSeconds = 1;
long leaseId = -0xabcd;
BookieId bookieId = BookieId.parse(runtime.getMethodName() + ":3181");
try (EtcdRegistrationManager regManager = new EtcdRegistrationManager(
newEtcdClient(), scope, 10000000 * ttlSeconds)
) {
regManager.registerBookie(bookieId, false, BookieServiceInfo.EMPTY);
leaseId = regManager.getBkRegister().getLeaseId();
log.info("Registered bookie under scope '{}' with lease = {}", scope, leaseId);
}
assertNotEquals(-0xabcd, leaseId);
try (EtcdRegistrationManager regManager = new EtcdRegistrationManager(
newEtcdClient(), scope, ttlSeconds)
) {
regManager.registerBookie(bookieId, false, BookieServiceInfo.EMPTY);
fail("Should fail to register bookie under scope '{}'"
+ " since previous registration has not been expired yet");
} catch (MetadataStoreException mse) {
log.info("Encountered exception on registering bookie under scope '{}'", scope, mse);
// expected
}
}
@Test
public void testRegisterWritableBookieWithSameLeaseId() throws Exception {
testRegisterBookieWithSameLeaseId(false);
}
@Test
public void testRegisterReadonlyBookieWithSameLeaseId() throws Exception {
testRegisterBookieWithSameLeaseId(true);
}
private void testRegisterBookieWithSameLeaseId(boolean readonly) throws Exception {
long ttlSeconds = 1;
long leaseId = -0xabcd;
BookieId bookieId = BookieId.parse(runtime.getMethodName() + ":3181");
try (EtcdRegistrationManager regManager = new EtcdRegistrationManager(
newEtcdClient(), scope, 10000000 * ttlSeconds)
) {
regManager.registerBookie(bookieId, readonly, BookieServiceInfo.EMPTY);
leaseId = regManager.getBkRegister().getLeaseId();
log.info("Registered bookie under scope '{}' with lease = {}", scope, leaseId);
log.info("Trying to register using same lease '{}'", leaseId);
try (EtcdRegistrationManager regManager2 = new EtcdRegistrationManager(
regManager.getClient(), scope, regManager.getBkRegister()
)) {
regManager.registerBookie(bookieId, readonly, BookieServiceInfo.EMPTY);
}
}
}
private Set<BookieId> getBookies(boolean readonly) throws Exception {
Set<BookieId> bookies;
if (readonly) {
bookies = result(regClient.getReadOnlyBookies()).getValue();
} else {
bookies = result(regClient.getWritableBookies()).getValue();
}
return bookies;
}
@Test
public void testRegisterUnregisterWritableBookie() throws Exception {
testRegisterUnregister(false);
}
@Test
public void testRegisterUnregisterReadonlyBookie() throws Exception {
testRegisterUnregister(true);
}
private void testRegisterUnregister(boolean readonly) throws Exception {
String bookieIdStr = runtime.getMethodName();
if (readonly) {
bookieIdStr += "-readonly";
}
bookieIdStr += ":3181";
BookieId bookieId = BookieId.parse(bookieIdStr);
try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(
newEtcdClient(), scope, 1000000000
)) {
// before registration
Set<BookieId> bookies = getBookies(readonly);
log.info("before registration : bookies = {}", bookies);
assertEquals(0, bookies.size());
// registered
regMgr.registerBookie(bookieId, readonly, BookieServiceInfo.EMPTY);
bookies = getBookies(readonly);
log.info("after registered: bookies = {}", bookies);
assertEquals(1, bookies.size());
assertEquals(
Sets.newHashSet(bookieId),
bookies);
// unregistered
regMgr.unregisterBookie(bookieId, readonly);
bookies = getBookies(readonly);
log.info("after unregistered: bookies = {}", bookies);
assertEquals(0, bookies.size());
}
}
@Test
public void testConcurrentWritableRegistration() throws Exception {
testConcurrentRegistration(false);
}
@Test
public void testConcurrentReadonlyRegistration() throws Exception {
testConcurrentRegistration(true);
}
private void testConcurrentRegistration(boolean readonly) throws Exception {
final BookieId bookieId;
if (readonly) {
bookieId = BookieId.parse(runtime.getMethodName() + "-readonly:3181");
} else {
bookieId = BookieId.parse(runtime.getMethodName() + ":3181");
}
final int numBookies = 10;
@Cleanup("shutdown")
ExecutorService executor = Executors.newFixedThreadPool(numBookies);
final CyclicBarrier startBarrier = new CyclicBarrier(numBookies);
final CyclicBarrier completeBarrier = new CyclicBarrier(numBookies);
final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
final AtomicInteger numSuccesses = new AtomicInteger(0);
final AtomicInteger numFailures = new AtomicInteger(0);
for (int i = 0; i < numBookies; i++) {
executor.submit(() -> {
try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(
newEtcdClient(), scope, 1
)) {
try {
startBarrier.await();
regMgr.registerBookie(bookieId, readonly, BookieServiceInfo.EMPTY);
numSuccesses.incrementAndGet();
} catch (InterruptedException e) {
log.warn("Interrupted at waiting for the other threads to start", e);
} catch (BrokenBarrierException e) {
log.warn("Start barrier is broken", e);
} catch (BookieException e) {
numFailures.incrementAndGet();
}
try {
completeBarrier.await();
} catch (InterruptedException e) {
log.warn("Interrupted at waiting for the other threads to complete", e);
} catch (BrokenBarrierException e) {
log.warn("Complete barrier is broken", e);
}
FutureUtils.complete(doneFuture, null);
}
});
}
doneFuture.join();
assertEquals(1, numSuccesses.get());
assertEquals(numBookies - 1, numFailures.get());
}
}
| 571 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/EtcdCookieTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException.CookieNotFoundException;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Version.Occurred;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
/**
* Test Etcd based cookie management.
*/
@Slf4j
public class EtcdCookieTest extends EtcdTestBase {
@Rule
public final TestName runtime = new TestName();
private RegistrationManager regMgr;
@Before
@Override
public void setUp() throws Exception {
log.info("setup");
super.setUp();
String scope = RandomStringUtils.randomAlphabetic(16);
this.regMgr = new EtcdRegistrationManager(
newEtcdClient(),
scope
);
log.info("done setup");
}
@After
@Override
public void tearDown() throws Exception {
log.info("tear down");
this.regMgr.close();
super.tearDown();
}
private static void assertCookieEquals(Versioned<byte[]> expected, Versioned<byte[]> actual) {
assertEquals(Occurred.CONCURRENTLY, expected.getVersion().compare(actual.getVersion()));
assertArrayEquals(expected.getValue(), actual.getValue());
}
@Test
public void readWriteRemoveCookie() throws Exception {
BookieId bookieId = BookieId.parse(runtime.getMethodName() + ":3181");
log.info("read non-existing cookie");
// read the cookie doesn't exist
try {
regMgr.readCookie(bookieId);
fail("Should fail reading cookie if cookie doesn't exist");
} catch (CookieNotFoundException cnfe) {
// expected
}
log.info("create cookie");
// create the cookie
String cookieData = RandomStringUtils.randomAlphanumeric(1024);
Versioned<byte[]> cookie = new Versioned<>(
cookieData.getBytes(UTF_8), Version.NEW
);
regMgr.writeCookie(bookieId, cookie);
log.info("read cookie");
// read the cookie
Versioned<byte[]> readCookie = regMgr.readCookie(bookieId);
assertEquals(cookieData, new String(readCookie.getValue(), UTF_8));
log.info("try to create cookie again");
// attempt to create the cookie again
String newCookieData = RandomStringUtils.randomAlphabetic(512);
Versioned<byte[]> newCookie = new Versioned<>(
newCookieData.getBytes(UTF_8), Version.NEW
);
try {
regMgr.writeCookie(bookieId, newCookie);
fail("Should fail creating cookie if the cookie already exists");
} catch (MetadataStoreException mse) {
assertTrue(mse.getMessage().contains("Conflict on writing cookie"));
}
Versioned<byte[]> readCookie2 = regMgr.readCookie(bookieId);
assertCookieEquals(readCookie, readCookie2);
log.info("update cookie with wrong version");
// attempt to update the cookie with a wrong version
newCookie = new Versioned<>(
newCookieData.getBytes(UTF_8), new LongVersion(Long.MAX_VALUE)
);
try {
regMgr.writeCookie(bookieId, newCookie);
} catch (MetadataStoreException mse) {
assertTrue(mse.getMessage().contains("Conflict on writing cookie"));
}
readCookie2 = regMgr.readCookie(bookieId);
assertCookieEquals(readCookie, readCookie2);
log.info("delete cookie with wrong version");
// delete the cookie with a wrong version
LongVersion badVersion = new LongVersion(Long.MAX_VALUE);
try {
regMgr.removeCookie(bookieId, badVersion);
fail("Should fail to remove cookie with bad version");
} catch (MetadataStoreException mse) {
assertTrue(mse.getMessage().contains(
"bad version '" + badVersion + "'"
));
}
readCookie2 = regMgr.readCookie(bookieId);
assertCookieEquals(readCookie, readCookie2);
log.info("update with right version");
// update the cookie with right version
newCookie = new Versioned<>(
newCookieData.getBytes(UTF_8), readCookie2.getVersion());
regMgr.writeCookie(bookieId, newCookie);
readCookie2 = regMgr.readCookie(bookieId);
assertEquals(newCookieData, new String(readCookie2.getValue(), UTF_8));
assertEquals(Occurred.AFTER, readCookie2.getVersion().compare(readCookie.getVersion()));
log.info("delete with right version");
// delete the cookie with right version
regMgr.removeCookie(bookieId, readCookie2.getVersion());
try {
regMgr.readCookie(bookieId);
fail("Should fail reading cookie if cookie doesn't exist");
} catch (CookieNotFoundException cnfe) {
// expected
}
log.info("remove non-existing cookie");
// remove a cookie that doesn't exist
try {
regMgr.removeCookie(bookieId, readCookie2.getVersion());
fail("Should fail removing cookie if cookie doesn't exist");
} catch (CookieNotFoundException cnfe) {
// expected
}
}
}
| 572 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/EtcdLedgerManagerTest.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BKException.Code;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.LedgerMetadataBuilder;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRange;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRangeIterator;
import org.apache.bookkeeper.metadata.etcd.helpers.ValueStream;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.LedgerMetadataListener;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Integration test {@link EtcdLedgerManager}.
*/
@Slf4j
public class EtcdLedgerManagerTest extends EtcdTestBase {
private String scope;
private EtcdLedgerManager lm;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
this.scope = RandomStringUtils.randomAlphabetic(8);
this.lm = new EtcdLedgerManager(etcdClient, scope);
}
@Override
@After
public void tearDown() throws Exception {
if (null != lm) {
lm.close();
}
super.tearDown();
}
@Test
public void testLedgerCRUD() throws Exception {
long ledgerId = System.currentTimeMillis();
List<BookieId> ensemble = Lists.newArrayList(
BookieId.parse("192.0.2.1:1234"),
BookieId.parse("192.0.2.2:1234"),
BookieId.parse("192.0.2.3:1234"));
LedgerMetadata metadata = LedgerMetadataBuilder.create().withId(ledgerId)
.withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2)
.withPassword("test-password".getBytes(UTF_8))
.withDigestType(DigestType.CRC32C.toApiDigestType())
.newEnsembleEntry(0L, ensemble)
.build();
// ledger doesn't exist: read
try {
result(lm.readLedgerMetadata(ledgerId));
fail("Should fail on reading ledger metadata if the ledger doesn't exist");
} catch (BKException bke) {
assertEquals(Code.NoSuchLedgerExistsException, bke.getCode());
}
// ledger doesn't exist : delete
try {
result(lm.removeLedgerMetadata(ledgerId, new LongVersion(999L)));
fail("Should fail on deleting ledger metadata if the ledger doesn't exist");
} catch (BKException bke) {
assertEquals(Code.NoSuchLedgerExistsException, bke.getCode());
}
// ledger doesn't exist : write
try {
result(lm.writeLedgerMetadata(ledgerId, metadata, new LongVersion(999L)));
fail("Should fail on updating ledger metadata if the ledger doesn't exist");
} catch (BKException bke) {
assertEquals(Code.NoSuchLedgerExistsException, bke.getCode());
}
// ledger doesn't exist : create
Versioned<LedgerMetadata> writtenMetadata = result(lm.createLedgerMetadata(ledgerId, metadata));
assertSame(metadata, writtenMetadata.getValue());
Version version = writtenMetadata.getVersion();
assertNotNull(version);
assertTrue(version instanceof LongVersion);
assertTrue(((LongVersion) version).getLongVersion() > 0L);
// ledger exists : create
// attempt to create the ledger again will result in exception `LedgerExistsException`
try {
result(lm.createLedgerMetadata(ledgerId, metadata));
fail("Should fail on creating ledger metadata if the ledger already exists");
} catch (BKException bke) {
assertEquals(Code.LedgerExistException, bke.getCode());
}
// ledger exists: get
Versioned<LedgerMetadata> readMetadata = result(lm.readLedgerMetadata(ledgerId));
assertEquals(metadata, readMetadata.getValue());
// ledger exists: update metadata with wrong version
try {
result(lm.writeLedgerMetadata(ledgerId, readMetadata.getValue(), new LongVersion(Long.MAX_VALUE)));
fail("Should fail to write metadata using a wrong version");
} catch (BKException bke) {
assertEquals(Code.MetadataVersionException, bke.getCode());
}
readMetadata = result(lm.readLedgerMetadata(ledgerId));
assertEquals(metadata, readMetadata.getValue());
// ledger exists: delete metadata with wrong version
try {
result(lm.removeLedgerMetadata(ledgerId, new LongVersion(Long.MAX_VALUE)));
fail("Should fail to delete metadata using a wrong version");
} catch (BKException bke) {
assertEquals(Code.MetadataVersionException, bke.getCode());
}
readMetadata = result(lm.readLedgerMetadata(ledgerId));
assertEquals(metadata, readMetadata.getValue());
// ledger exists: update metadata with the right version
LongVersion curVersion = (LongVersion) readMetadata.getVersion();
writtenMetadata = result(lm.writeLedgerMetadata(ledgerId, readMetadata.getValue(), curVersion));
LongVersion newVersion = (LongVersion) writtenMetadata.getVersion();
assertTrue(curVersion.getLongVersion() < newVersion.getLongVersion());
readMetadata = result(lm.readLedgerMetadata(ledgerId));
assertEquals(writtenMetadata, readMetadata);
// ledger exists: delete metadata with the right version
result(lm.removeLedgerMetadata(ledgerId, newVersion));
try {
result(lm.readLedgerMetadata(ledgerId));
fail("Should fail to read ledger if it is deleted");
} catch (BKException bke) {
assertEquals(Code.NoSuchLedgerExistsException, bke.getCode());
}
}
@Test
public void testProcessLedgers() throws Exception {
final int numLedgers = 100;
createNumLedgers(numLedgers);
final CountDownLatch processLatch = new CountDownLatch(numLedgers);
final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
lm.asyncProcessLedgers(
(l, cb) -> processLatch.countDown(),
(rc, path, ctx) -> {
if (Code.OK == rc) {
FutureUtils.complete(doneFuture, null);
} else {
FutureUtils.completeExceptionally(doneFuture, BKException.create(rc));
}
},
null,
Code.OK,
Code.MetaStoreException);
result(doneFuture);
processLatch.await();
}
@Test
public void testLedgerRangeIterator() throws Exception {
final int numLedgers = 100;
createNumLedgers(numLedgers);
long nextLedgerId = 0L;
LedgerRangeIterator iter = lm.getLedgerRanges(0);
while (iter.hasNext()) {
LedgerRange lr = iter.next();
for (Long lid : lr.getLedgers()) {
assertEquals(nextLedgerId, lid.longValue());
++nextLedgerId;
}
}
assertEquals((long) numLedgers, nextLedgerId);
}
private void createNumLedgers(int numLedgers) throws Exception {
List<CompletableFuture<Versioned<LedgerMetadata>>> createFutures = new ArrayList<>(numLedgers);
for (int i = 0; i < numLedgers; i++) {
LedgerMetadata metadata = LedgerMetadataBuilder.create().withId(i)
.withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2)
.withPassword("test-password".getBytes(UTF_8))
.withDigestType(DigestType.CRC32C.toApiDigestType())
.newEnsembleEntry(0L, createNumBookies(3)).build();
createFutures.add(lm.createLedgerMetadata(i, metadata));
}
FutureUtils.result(FutureUtils.collect(createFutures));
}
@Test
public void testRegisterLedgerMetadataListener() throws Exception {
long ledgerId = System.currentTimeMillis();
// create a ledger metadata
LedgerMetadata metadata = LedgerMetadataBuilder.create().withId(ledgerId)
.withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2)
.withPassword("test-password".getBytes(UTF_8))
.withDigestType(DigestType.CRC32C.toApiDigestType())
.newEnsembleEntry(0L, createNumBookies(3)).build();
result(lm.createLedgerMetadata(ledgerId, metadata));
Versioned<LedgerMetadata> readMetadata = lm.readLedgerMetadata(ledgerId).get();
log.info("Create ledger metadata : {}", readMetadata.getValue());
// register first listener
LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue1 = new LinkedBlockingQueue<>();
LedgerMetadataListener listener1 = (lid, m) -> {
log.info("[listener1] Received ledger {} metadata : {}", lid, m);
metadataQueue1.add(m);
};
log.info("Registered first listener for ledger {}", ledgerId);
lm.registerLedgerMetadataListener(ledgerId, listener1);
// we should receive a metadata notification when a ledger is created
Versioned<LedgerMetadata> notifiedMetadata = metadataQueue1.take();
assertEquals(readMetadata, notifiedMetadata);
ValueStream<LedgerMetadata> lms = lm.getLedgerMetadataStream(ledgerId);
assertNotNull(lms.waitUntilWatched());
assertNotNull(result(lms.waitUntilWatched()));
// register second listener
LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue2 = new LinkedBlockingQueue<>();
LedgerMetadataListener listener2 = (lid, m) -> {
log.info("[listener2] Received ledger {} metadata : {}", lid, m);
metadataQueue2.add(m);
};
log.info("Registered second listener for ledger {}", ledgerId);
lm.registerLedgerMetadataListener(ledgerId, listener2);
Versioned<LedgerMetadata> notifiedMetadata2 = metadataQueue2.take();
assertEquals(readMetadata, notifiedMetadata2);
assertNotNull(lm.getLedgerMetadataStream(ledgerId));
// update the metadata
lm.writeLedgerMetadata(ledgerId,
LedgerMetadataBuilder.from(metadata).newEnsembleEntry(10L, createNumBookies(3)).build(),
notifiedMetadata.getVersion()).get();
readMetadata = lm.readLedgerMetadata(ledgerId).get();
assertEquals(readMetadata, metadataQueue1.take());
assertEquals(readMetadata, metadataQueue2.take());
lms = lm.getLedgerMetadataStream(ledgerId);
assertNotNull(lms);
assertEquals(2, lms.getNumConsumers());
// remove listener2
lm.unregisterLedgerMetadataListener(ledgerId, listener2);
lms = lm.getLedgerMetadataStream(ledgerId);
assertNotNull(lms);
assertEquals(1, lms.getNumConsumers());
// update the metadata again
lm.writeLedgerMetadata(ledgerId,
LedgerMetadataBuilder.from(metadata).newEnsembleEntry(20L, createNumBookies(3)).build(),
readMetadata.getVersion()).get();
readMetadata = lm.readLedgerMetadata(ledgerId).get();
assertEquals(readMetadata, metadataQueue1.take());
assertNull(metadataQueue2.poll());
// remove listener1
lm.unregisterLedgerMetadataListener(ledgerId, listener1);
// the value stream will be removed
while (lm.getLedgerMetadataStream(ledgerId) != null) {
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(0, lms.getNumConsumers());
// update the metadata again
lm.writeLedgerMetadata(ledgerId,
LedgerMetadataBuilder.from(metadata).newEnsembleEntry(30L, createNumBookies(3)).build(),
readMetadata.getVersion()).get();
readMetadata = lm.readLedgerMetadata(ledgerId).get();
assertNull(metadataQueue1.poll());
assertNull(metadataQueue2.poll());
log.info("Registered first listener for ledger {} again", ledgerId);
lm.registerLedgerMetadataListener(ledgerId, listener1);
notifiedMetadata = metadataQueue1.take();
assertEquals(readMetadata, notifiedMetadata);
lms = lm.getLedgerMetadataStream(ledgerId);
assertNotNull(lms);
assertEquals(1, lms.getNumConsumers());
// delete the ledger
lm.removeLedgerMetadata(ledgerId, readMetadata.getVersion()).get();
// the listener will eventually be removed
while (lm.getLedgerMetadataStream(ledgerId) != null) {
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(1, lms.getNumConsumers());
assertNull(metadataQueue1.poll());
assertNull(metadataQueue2.poll());
}
static List<BookieId> createNumBookies(int numBookies) {
return IntStream.range(0, numBookies)
.mapToObj(idx -> BookieId.parse("127.0.0.1:" + (3181 + idx)))
.collect(Collectors.toList());
}
}
| 573 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/EtcdLayoutManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.LAYOUT_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.meta.LayoutManager.LedgerLayoutExistsException;
import org.apache.bookkeeper.meta.LedgerLayout;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.Before;
import org.junit.Test;
/**
* Integration test {@link EtcdLayoutManager}.
*/
@Slf4j
public class EtcdLayoutManagerTest extends EtcdTestBase {
private static final int managerVersion = 0xabcd;
private String scope;
private EtcdLayoutManager layoutManager;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
this.scope = "/" + RandomStringUtils.randomAlphabetic(8);
this.layoutManager = new EtcdLayoutManager(etcdClient, scope);
log.info("setup layout manager under scope {}", scope);
}
@Test
public void testReadCreateDeleteLayout() throws Exception {
// layout doesn't exist
assertNull(layoutManager.readLedgerLayout());
// create the layout
LedgerLayout layout = new LedgerLayout(
EtcdLedgerManagerFactory.class.getName(),
managerVersion
);
layoutManager.storeLedgerLayout(layout);
// read the layout
LedgerLayout readLayout = layoutManager.readLedgerLayout();
assertEquals(layout, readLayout);
// attempts to create the layout again and it should fail
LedgerLayout newLayout = new LedgerLayout(
"new layout",
managerVersion + 1
);
try {
layoutManager.storeLedgerLayout(newLayout);
fail("Should fail storeLedgerLayout if layout already exists");
} catch (LedgerLayoutExistsException e) {
// expected
}
// read the layout again (layout should not be changed)
readLayout = layoutManager.readLedgerLayout();
assertEquals(layout, readLayout);
// delete the layout
layoutManager.deleteLedgerLayout();
// the layout should be gone now
assertNull(layoutManager.readLedgerLayout());
// delete the layout again. it should fail since layout doesn't exist
try {
layoutManager.deleteLedgerLayout();
fail("Should fail deleteLedgerLayout is layout not found");
} catch (IOException ioe) {
assertEquals(
"No ledger layout is found under '" + scope + "/" + LAYOUT_NODE + "'",
ioe.getMessage());
}
}
}
| 574 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/Etcd64bitIdGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.google.common.util.concurrent.RateLimiter;
import io.etcd.jetcd.Client;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import lombok.Cleanup;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallbackFuture;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.Before;
import org.junit.Test;
/**
* Integration test {@link Etcd64bitIdGenerator}.
*/
@Slf4j
public class Etcd64bitIdGeneratorTest extends EtcdTestBase {
private String scope;
private Etcd64bitIdGenerator generator;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
this.scope = "/" + RandomStringUtils.randomAlphabetic(8);
this.generator = new Etcd64bitIdGenerator(etcdClient.getKVClient(), scope);
log.info("Setup id generator under scope {}", scope);
}
@Test
public void testGenerateIdSequence() throws Exception {
Map<Integer, Long> buckets = new HashMap<>();
int numIterations = 10;
for (int i = 0; i < numIterations; i++) {
log.info("Id generation iteration : {}", i);
for (int j = 0; j < Etcd64bitIdGenerator.NUM_BUCKETS; j++) {
GenericCallbackFuture<Long> future = new GenericCallbackFuture<>();
generator.generateLedgerId(future);
long lid = future.get();
int bucketId = Etcd64bitIdGenerator.getBucketId(lid);
long idInBucket = Etcd64bitIdGenerator.getIdInBucket(lid);
Long prevIdInBucket = buckets.put(bucketId, idInBucket);
if (null == prevIdInBucket) {
assertEquals(1, idInBucket);
} else {
assertEquals(prevIdInBucket + 1, idInBucket);
}
}
}
assertEquals(Etcd64bitIdGenerator.NUM_BUCKETS, buckets.size());
for (Map.Entry<Integer, Long> bucketEntry : buckets.entrySet()) {
assertEquals(numIterations, bucketEntry.getValue().intValue());
}
}
/**
* Test generating id in parallel and ensure there is no duplicated id.
*/
@Test
public void testGenerateIdParallel() throws Exception {
final int numThreads = 10;
@Cleanup("shutdown")
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
final int numIds = 10000;
final AtomicLong totalIds = new AtomicLong(numIds);
final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>());
final RateLimiter limiter = RateLimiter.create(1000);
final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
for (int i = 0; i < numThreads; i++) {
executor.submit(() -> {
Client client = Client.builder()
.endpoints(etcdContainer.getClientEndpoint())
.build();
Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(
client.getKVClient(),
scope
);
AtomicBoolean running = new AtomicBoolean(true);
while (running.get()) {
limiter.acquire();
GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>();
gen.generateLedgerId(genFuture);
genFuture
.thenAccept(lid -> {
boolean duplicatedFound = !(ids.add(lid));
if (duplicatedFound) {
running.set(false);
doneFuture.completeExceptionally(
new IllegalStateException("Duplicated id " + lid + " generated : " + ids));
return;
} else {
if (totalIds.decrementAndGet() <= 0) {
running.set(false);
doneFuture.complete(null);
}
}
})
.exceptionally(cause -> {
running.set(false);
doneFuture.completeExceptionally(cause);
return null;
});
}
});
}
FutureUtils.result(doneFuture);
assertTrue(totalIds.get() <= 0);
assertTrue(ids.size() >= numIds);
}
}
| 575 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/EtcdClusterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getBucketsPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getClusterInstanceIdPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getLayoutKey;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getLedgersPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getReadonlyBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getScopeEndKey;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getUnderreplicationPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getWritableBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.msResult;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.options.GetOption;
import java.util.UUID;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.meta.LedgerLayout;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.net.BookieId;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test cluster related operation on Etcd based registration manager.
*/
@Slf4j
public class EtcdClusterTest extends EtcdTestBase {
private String scope;
private RegistrationManager regMgr;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
this.scope = RandomStringUtils.randomAlphabetic(32);
this.regMgr = new EtcdRegistrationManager(
newEtcdClient(), scope
);
}
@After
@Override
public void tearDown() throws Exception {
this.regMgr.close();
super.tearDown();
}
@Test
public void testGetClusterInstanceIdIfClusterNotInitialized() throws Exception {
try {
regMgr.getClusterInstanceId();
fail("Should fail getting cluster instance id if cluster not initialized");
} catch (MetadataStoreException e) {
assertTrue(e.getMessage().contains("BookKeeper is not initialized"));
}
}
@Test
public void testGetClusterInstanceId() throws Exception {
assertClusterNotExists(etcdClient, scope);
regMgr.initNewCluster();
String instanceId = regMgr.getClusterInstanceId();
UUID uuid = UUID.fromString(instanceId);
log.info("Cluster instance id : {}", uuid);
}
@Test
public void testNukeNonExistingCluster() throws Exception {
assertClusterNotExists(etcdClient, scope);
assertTrue(regMgr.nukeExistingCluster());
assertClusterNotExists(etcdClient, scope);
}
@Test
public void testNukeExistingCluster() throws Exception {
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
assertTrue(regMgr.nukeExistingCluster());
assertClusterNotExists(etcdClient, scope);
}
@Test
public void testInitNewClusterTwice() throws Exception {
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
String instanceId = regMgr.getClusterInstanceId();
assertFalse(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
assertEquals(instanceId, regMgr.getClusterInstanceId());
}
@Test
public void testPrepareFormatNonExistingCluster() throws Exception {
assertFalse(regMgr.prepareFormat());
}
@Test
public void testPrepareFormatExistingCluster() throws Exception {
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
assertTrue(regMgr.prepareFormat());
}
@Test
public void testNukeExistingClusterWithWritableBookies() throws Exception {
testNukeExistingClusterWithBookies(false);
}
@Test
public void testNukeExistingClusterWithReadonlyBookies() throws Exception {
testNukeExistingClusterWithBookies(true);
}
private void testNukeExistingClusterWithBookies(boolean readonly) throws Exception {
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
createNumBookies(etcdClient, scope, 3, readonly);
assertFalse(regMgr.nukeExistingCluster());
assertClusterExists(etcdClient, scope);
removeNumBookies(etcdClient, scope, 3, readonly);
assertTrue(regMgr.nukeExistingCluster());
assertClusterNotExists(etcdClient, scope);
}
@Test
public void testNukeExistingClusterWithAllBookies() throws Exception {
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
createNumBookies(etcdClient, scope, 1, false);
createNumBookies(etcdClient, scope, 2, true);
assertFalse(regMgr.nukeExistingCluster());
assertClusterExists(etcdClient, scope);
removeNumBookies(etcdClient, scope, 1, false);
removeNumBookies(etcdClient, scope, 2, true);
assertTrue(regMgr.nukeExistingCluster());
assertClusterNotExists(etcdClient, scope);
}
@Test
public void testFormatNonExistingCluster() throws Exception {
assertClusterNotExists(etcdClient, scope);
assertTrue(regMgr.format());
assertClusterExists(etcdClient, scope);
}
@Test
public void testFormatExistingCluster() throws Exception {
assertClusterNotExists(etcdClient, scope);
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
String clusterInstanceId = regMgr.getClusterInstanceId();
assertTrue(regMgr.format());
assertClusterExists(etcdClient, scope);
assertNotEquals(clusterInstanceId, regMgr.getClusterInstanceId());
}
@Test
public void testFormatExistingClusterWithBookies() throws Exception {
assertClusterNotExists(etcdClient, scope);
assertTrue(regMgr.initNewCluster());
assertClusterExists(etcdClient, scope);
String clusterInstanceId = regMgr.getClusterInstanceId();
createNumBookies(etcdClient, scope, 3, false);
assertFalse(regMgr.format());
assertClusterExists(etcdClient, scope);
assertEquals(clusterInstanceId, regMgr.getClusterInstanceId());
}
private static void createNumBookies(Client client,
String scope,
int numBookies,
boolean readonly) throws Exception {
for (int i = 0; i < numBookies; i++) {
BookieId bookieId = BookieId.parse("bookie-" + i + ":3181");
String bookiePath;
if (readonly) {
bookiePath = EtcdUtils.getReadonlyBookiePath(scope, bookieId);
} else {
bookiePath = EtcdUtils.getWritableBookiePath(scope, bookieId);
}
msResult(client.getKVClient().put(
ByteSequence.from(bookiePath, UTF_8),
EtcdConstants.EMPTY_BS
));
}
}
private static void removeNumBookies(Client client,
String scope,
int numBookies,
boolean readonly) throws Exception {
for (int i = 0; i < numBookies; i++) {
BookieId bookieId = BookieId.parse("bookie-" + i + ":3181");
String bookiePath;
if (readonly) {
bookiePath = EtcdUtils.getReadonlyBookiePath(scope, bookieId);
} else {
bookiePath = EtcdUtils.getWritableBookiePath(scope, bookieId);
}
msResult(client.getKVClient().delete(
ByteSequence.from(bookiePath, UTF_8)
));
}
}
private static void assertClusterScope(Client client,
String scope) throws Exception {
GetResponse resp = msResult(
client.getKVClient().get(
ByteSequence.from(scope, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertClusterLayout(Client client,
String scope) throws Exception {
String layoutPath = getLayoutKey(scope);
GetResponse resp = msResult(
client.getKVClient().get(
ByteSequence.from(layoutPath, UTF_8)));
assertEquals(1, resp.getCount());
LedgerLayout layout = LedgerLayout.parseLayout(
resp.getKvs().get(0).getValue().getBytes()
);
assertEquals(
EtcdLedgerManagerFactory.class.getName(),
layout.getManagerFactoryClass()
);
assertEquals(EtcdLedgerManagerFactory.VERSION, layout.getManagerVersion());
assertEquals(LedgerLayout.LAYOUT_FORMAT_VERSION, layout.getLayoutFormatVersion());
}
private static void assertClusterInstanceId(Client client,
String scope) throws Exception {
String instanceIdPath = getClusterInstanceIdPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(instanceIdPath, UTF_8)));
assertEquals(1, resp.getCount());
String instanceId = new String(resp.getKvs().get(0).getValue().getBytes(), UTF_8);
UUID uuid = UUID.fromString(instanceId);
log.info("Cluster instance id : {}", uuid);
}
private static void assertBookiesPath(Client client,
String scope) throws Exception {
String bookiesPath = getBookiesPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(bookiesPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertWritableBookiesPath(Client client,
String scope) throws Exception {
String bookiesPath = getWritableBookiesPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(bookiesPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertReadonlyBookiesPath(Client client,
String scope) throws Exception {
String bookiesPath = getReadonlyBookiesPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(bookiesPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertLedgersPath(Client client, String scope) throws Exception {
String ledgersPath = getLedgersPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(ledgersPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertBucketsPath(Client client, String scope) throws Exception {
String bucketsPath = getBucketsPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(bucketsPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertUnderreplicationPath(Client client, String scope) throws Exception {
String urPath = getUnderreplicationPath(scope);
GetResponse resp = msResult(
client.getKVClient().get(ByteSequence.from(urPath, UTF_8)));
assertEquals(1, resp.getCount());
}
private static void assertClusterExists(Client client, String scope) throws Exception {
assertClusterScope(client, scope);
assertClusterLayout(client, scope);
assertClusterInstanceId(client, scope);
assertBookiesPath(client, scope);
assertWritableBookiesPath(client, scope);
assertReadonlyBookiesPath(client, scope);
assertLedgersPath(client, scope);
assertBucketsPath(client, scope);
assertUnderreplicationPath(client, scope);
}
private static void assertClusterNotExists(Client client, String scope) throws Exception {
GetResponse response = msResult(
client.getKVClient().get(
ByteSequence.from(scope, UTF_8),
GetOption.newBuilder()
.withRange(ByteSequence.from(getScopeEndKey(scope), UTF_8))
.build()));
assertEquals(0, response.getCount());
}
}
| 576 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/integration/SmokeTest.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.integration;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertEquals;
import java.util.concurrent.atomic.AtomicInteger;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.api.BookKeeper;
import org.apache.bookkeeper.client.api.DigestType;
import org.apache.bookkeeper.client.api.LedgerEntries;
import org.apache.bookkeeper.client.api.ReadHandle;
import org.apache.bookkeeper.client.api.WriteAdvHandle;
import org.apache.bookkeeper.client.api.WriteHandle;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdBKClusterTestBase;
import org.junit.Test;
/**
* Smoke testing etcd metadata drives.
*/
@Slf4j
public class SmokeTest extends EtcdBKClusterTestBase {
private static final byte[] PASSWD = "smoketest".getBytes(UTF_8);
private static void readEntries(BookKeeper bk,
long ledgerId,
int numExpectedEntries) throws Exception {
try (ReadHandle readlh = result(bk.newOpenLedgerOp()
.withLedgerId(ledgerId)
.withDigestType(DigestType.CRC32C)
.withPassword(PASSWD)
.execute()
)) {
long lac = readlh.getLastAddConfirmed();
AtomicInteger idx = new AtomicInteger(0);
try (LedgerEntries entries = readlh.read(0, lac)) {
entries.forEach(e -> assertEquals(
String.format("entry-%03d", idx.getAndIncrement()),
new String(e.getEntryBytes(), UTF_8)));
}
assertEquals(idx.get(), numExpectedEntries);
}
}
@Test
public void testReadWrite() throws Exception {
int numEntries = 100;
try (BookKeeper bk = BookKeeper.newBuilder(conf).build()) {
long ledgerId;
try (WriteHandle wh = result(bk.newCreateLedgerOp()
.withDigestType(DigestType.CRC32C)
.withPassword(PASSWD)
.execute())) {
ledgerId = wh.getId();
log.info("Successfully created ledger {} to append entries.", ledgerId);
for (int i = 0; i < numEntries; i++) {
wh.append(String.format("entry-%03d", i).getBytes(UTF_8));
}
}
log.info("Opening ledger {} to read entries ...", ledgerId);
readEntries(bk, ledgerId, numEntries);
log.info("Successfully read {} entries from ledger {}", numEntries, ledgerId);
}
}
@Test
public void testReadWriteAdv() throws Exception {
final int numEntries = 100;
try (BookKeeper bk = BookKeeper.newBuilder(conf).build()) {
long ledgerId;
try (WriteAdvHandle wah = result(bk.newCreateLedgerOp()
.withDigestType(DigestType.CRC32C)
.withPassword(PASSWD)
.makeAdv()
.execute())) {
ledgerId = wah.getId();
log.info("Successfully created adv ledger {} to append entries.", ledgerId);
for (int i = 0; i < numEntries; i++) {
wah.write(i, String.format("entry-%03d", i).getBytes(UTF_8));
}
}
log.info("Opening adv ledger {} to read entries ...", ledgerId);
readEntries(bk, ledgerId, numEntries);
log.info("Successfully read {} entries from adv ledger {}", numEntries, ledgerId);
}
}
}
| 577 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/testing/EtcdTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.testing;
import io.etcd.jetcd.Client;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Consumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.Timeout;
/**
* A test base that setup etcd cluster for testing.
*/
@Slf4j
public abstract class EtcdTestBase {
@Rule
public Timeout globalTimeout = Timeout.seconds(120);
protected static EtcdContainer etcdContainer;
@BeforeClass
public static void setupCluster() throws Exception {
etcdContainer = new EtcdContainer(RandomStringUtils.randomAlphabetic(8));
etcdContainer.start();
log.info("Successfully started etcd at {}", etcdContainer.getClientEndpoint());
}
@AfterClass
public static void teardownCluster() throws Exception {
if (null != etcdContainer) {
etcdContainer.stop();
log.info("Successfully stopped etcd.");
}
}
protected Client etcdClient;
protected static Client newEtcdClient() {
Client client = Client.builder()
.endpoints(etcdContainer.getClientEndpoint())
.build();
return client;
}
protected static <T> Consumer<Versioned<Set<T>>> consumeVersionedKeySet(
LinkedBlockingQueue<Versioned<Set<T>>> notifications) {
return versionedKeys -> {
log.info("Received new keyset : {}", versionedKeys);
try {
notifications.put(versionedKeys);
} catch (InterruptedException e) {
log.error("Interrupted at enqueuing updated key set", e);
}
};
}
@Before
public void setUp() throws Exception {
etcdClient = newEtcdClient();
log.info("Successfully build etcd client to endpoint {}", etcdContainer.getClientEndpoint());
}
@After
public void tearDown() throws Exception {
if (null != etcdClient) {
etcdClient.close();
log.info("Successfully close etcd client to endpoint {}", etcdContainer.getClientEndpoint());
}
}
}
| 578 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/testing/EtcdContainer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.testing;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.async.ResultCallback;
import com.github.dockerjava.api.command.LogContainerCmd;
import com.github.dockerjava.api.model.Frame;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.ContainerLaunchException;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.WaitStrategy;
import org.testcontainers.utility.LogUtils;
/**
* Etcd test container.
*/
@Slf4j
public class EtcdContainer extends GenericContainer<EtcdContainer> {
static class LogContainerResultCb extends ResultCallback.Adapter<Frame> {
@Override
public void onNext(Frame frame) {
log.info(new String(frame.getPayload(), UTF_8));
}
}
public static final String NAME = "etcd";
public static final int CLIENT_PORT = 2379;
private final String clusterName;
public EtcdContainer(String clusterName) {
super("quay.io/coreos/etcd:v3.3");
this.clusterName = clusterName;
}
public String getExternalServiceUri() {
return "etcd://" + getHost() + ":" + getEtcdClientPort() + "/clusters/" + clusterName;
}
public String getInternalServiceUri() {
return "etcd://" + NAME + ":" + CLIENT_PORT + "/clusters/" + clusterName;
}
@Override
protected void configure() {
super.configure();
String[] command = new String[] {
"/usr/local/bin/etcd",
"--name", NAME + "0",
"--initial-advertise-peer-urls", "http://" + NAME + ":2380",
"--listen-peer-urls", "http://0.0.0.0:2380",
"--advertise-client-urls", "http://" + NAME + ":2379",
"--listen-client-urls", "http://0.0.0.0:2379",
"--initial-cluster", NAME + "0=http://" + NAME + ":2380"
};
this.withNetworkAliases(NAME)
.withExposedPorts(CLIENT_PORT)
.withCreateContainerCmdModifier(createContainerCmd -> {
createContainerCmd.withHostName(NAME);
createContainerCmd.withName(clusterName + "-" + NAME);
})
.withCommand(command)
.withNetworkAliases(NAME)
.waitingFor(waitStrategy());
tailContainerLog();
}
public void tailContainerLog() {
CompletableFuture.runAsync(() -> {
while (null == this.getContainerId()) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
return;
}
}
LogContainerCmd logContainerCmd = this.dockerClient.logContainerCmd(this.getContainerId());
logContainerCmd.withStdOut(true).withStdErr(true).withFollowStream(true);
logContainerCmd.exec(new LogContainerResultCb());
});
}
public int getEtcdClientPort() {
return getMappedPort(CLIENT_PORT);
}
public String getClientEndpoint() {
return String.format("http://%s:%d", getHost(), getEtcdClientPort());
}
private WaitStrategy waitStrategy() {
return new org.testcontainers.containers.wait.strategy.AbstractWaitStrategy() {
@Override
protected void waitUntilReady() {
final DockerClient client = DockerClientFactory.instance().client();
final WaitingConsumer waitingConsumer = new WaitingConsumer();
LogUtils.followOutput(client, waitStrategyTarget.getContainerId(), waitingConsumer);
try {
waitingConsumer.waitUntil(
f -> f.getUtf8String().contains("ready to serve client requests"),
startupTimeout.getSeconds(),
TimeUnit.SECONDS,
1
);
} catch (TimeoutException e) {
throw new ContainerLaunchException("Timed out");
}
}
};
}
}
| 579 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/testing/EtcdBKClusterTestBase.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.testing;
import static org.junit.Assert.assertTrue;
import io.netty.buffer.ByteBufAllocator;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.Bookie;
import org.apache.bookkeeper.bookie.MockUncleanShutdownDetection;
import org.apache.bookkeeper.bookie.TestBookieImpl;
import org.apache.bookkeeper.client.BookKeeperAdmin;
import org.apache.bookkeeper.client.api.BookKeeper;
import org.apache.bookkeeper.common.net.ServiceURI;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.conf.TestBKConfiguration;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.metadata.etcd.EtcdMetadataBookieDriver;
import org.apache.bookkeeper.metadata.etcd.EtcdMetadataClientDriver;
import org.apache.bookkeeper.proto.BookieServer;
import org.apache.bookkeeper.test.TestStatsProvider;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.bookkeeper.util.PortManager;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* A test base that run an Etcd based bookkeeper cluster.
*/
@Slf4j
public abstract class EtcdBKClusterTestBase extends EtcdTestBase {
protected static ClientConfiguration baseClientConf;
protected static ServerConfiguration baseServerConf;
protected static final int NUM_BOOKIES = 3;
protected static final List<BookieServer> BOOKIES = new ArrayList<>(NUM_BOOKIES);
protected static final List<File> TMP_DIRS = new ArrayList<>(NUM_BOOKIES);
protected static File createTempDir(String prefix, String suffix) throws IOException {
File dir = IOUtils.createTempDir(prefix, suffix);
TMP_DIRS.add(dir);
return dir;
}
protected static ServerConfiguration newServerConfiguration() throws Exception {
File f = createTempDir("bookie", "test");
int port = PortManager.nextFreePort();
return newServerConfiguration(port, f, new File[] { f });
}
protected static ServerConfiguration newServerConfiguration(int port, File journalDir, File[] ledgerDirs) {
ServerConfiguration conf = new ServerConfiguration(baseServerConf);
conf.setBookiePort(port);
conf.setJournalDirName(journalDir.getPath());
String[] ledgerDirNames = new String[ledgerDirs.length];
for (int i = 0; i < ledgerDirs.length; i++) {
ledgerDirNames[i] = ledgerDirs[i].getPath();
}
conf.setLedgerDirNames(ledgerDirNames);
conf.setEnableTaskExecutionStats(true);
return conf;
}
@BeforeClass
public static void setupCluster() throws Exception {
setupCluster(NUM_BOOKIES);
}
protected static void setupCluster(int numBookies) throws Exception {
EtcdTestBase.setupCluster();
MetadataDrivers.registerBookieDriver(
"etcd", EtcdMetadataBookieDriver.class
);
MetadataDrivers.registerClientDriver(
"etcd", EtcdMetadataClientDriver.class
);
log.info("Successfully started etcd at:"
+ " internal service uri = {}, external service uri = {}",
etcdContainer.getInternalServiceUri(), etcdContainer.getExternalServiceUri());
ServiceURI uri = ServiceURI.create(etcdContainer.getExternalServiceUri());
baseClientConf = new ClientConfiguration()
.setMetadataServiceUri(uri.getUri().toString());
baseServerConf = TestBKConfiguration.newServerConfiguration()
.setMetadataServiceUri(uri.getUri().toString());
// format the cluster
assertTrue(BookKeeperAdmin.format(baseServerConf, false, true));
// start bookies
startNumBookies(numBookies);
}
private static void startNumBookies(int numBookies) throws Exception {
for (int i = 0; i < numBookies; i++) {
ServerConfiguration conf = newServerConfiguration();
log.info("Starting new bookie on port : {}", conf.getBookiePort());
BookieServer server = startBookie(conf);
synchronized (BOOKIES) {
BOOKIES.add(server);
}
}
}
private static BookieServer startBookie(ServerConfiguration conf) throws Exception {
conf.setAutoRecoveryDaemonEnabled(true);
TestStatsProvider provider = new TestStatsProvider();
Bookie bookie = new TestBookieImpl(conf);
BookieServer server = new BookieServer(conf, bookie, provider.getStatsLogger(""),
ByteBufAllocator.DEFAULT,
new MockUncleanShutdownDetection());
server.start();
return server;
}
@AfterClass
public static void teardownCluster() throws Exception {
// stop bookies
stopBookies();
// stop metadata store
EtcdTestBase.teardownCluster();
log.info("Stopped the metadata store.");
// clean up temp dirs
for (File f : TMP_DIRS) {
FileUtils.deleteDirectory(f);
}
log.info("Clean up all the temp directories.");
}
private static void stopBookies() {
synchronized (BOOKIES) {
BOOKIES.forEach(BookieServer::shutdown);
log.info("Stopped all the bookies.");
}
}
protected ClientConfiguration conf;
protected BookKeeper bk;
@Before
public void setUp() throws Exception {
conf = new ClientConfiguration()
.setMetadataServiceUri(etcdContainer.getExternalServiceUri());
bk = BookKeeper.newBuilder(conf).build();
}
@After
public void tearDown() throws Exception {
if (null != bk) {
bk.close();
}
}
}
| 580 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/helpers/KeySetReaderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.options.PutOption;
import io.etcd.jetcd.support.CloseableClient;
import io.etcd.jetcd.support.Observers;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.function.Consumer;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version.Occurred;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.compress.utils.Sets;
import org.junit.Test;
import org.testcontainers.shaded.org.apache.commons.lang3.RandomStringUtils;
/**
* Integration test {@link KeySetReader}.
*/
@Slf4j
public class KeySetReaderTest extends EtcdTestBase {
private static final Function<ByteSequence, String> BYTE_SEQUENCE_STRING_FUNCTION =
bs -> bs.toString(StandardCharsets.UTF_8);
@Test
public void testReadSingleKey() throws Exception {
String key = RandomStringUtils.randomAlphabetic(16);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
try (KeySetReader<String> ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
keyBs,
null
)) {
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.read());
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertFalse(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
// update a value
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs));
// update the value should not change local value
assertEquals(versionedKeys, ksReader.getLocalValue());
// read the key again
Versioned<Set<String>> newVersionedKey = FutureUtils.result(ksReader.read());
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(1, newVersionedKey.getValue().size());
assertEquals(Sets.newHashSet(key), newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
}
}
@Test
public void testWatchSingleKey() throws Exception {
String key = RandomStringUtils.randomAlphabetic(16);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
KeySetReader<String> ksReader = null;
try {
ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
keyBs,
null
);
LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>();
Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications);
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer));
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertTrue(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
Versioned<Set<String>> newVersionedKey = notifications.take();
assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(versionedKeys, newVersionedKey);
versionedKeys = newVersionedKey;
// update a value
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs));
// we should get notified with updated key set
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(1, newVersionedKey.getValue().size());
assertEquals(Sets.newHashSet(key), newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
// delete the key
FutureUtils.result(etcdClient.getKVClient().delete(keyBs));
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(0, newVersionedKey.getValue().size());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
} finally {
if (null != ksReader) {
ksReader.close();
}
}
assertNotNull(ksReader);
assertFalse(ksReader.isWatcherSet());
}
@Test
public void testWatchSingleKeyWithTTL() throws Exception {
String key = RandomStringUtils.randomAlphabetic(16);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
KeySetReader<String> ksReader = null;
try {
ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
keyBs,
null
);
LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>();
Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications);
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer));
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertTrue(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
// no watch event should be issued
Versioned<Set<String>> newVersionedKey = notifications.take();
assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(versionedKeys, newVersionedKey);
versionedKeys = newVersionedKey;
// create a key with ttl
long leaseId = FutureUtils.result(etcdClient.getLeaseClient().grant(1)).getID();
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
FutureUtils.result(etcdClient.getKVClient()
.put(keyBs, valueBs, PutOption.newBuilder().withLeaseId(leaseId).build()));
// we should get notified with updated key set
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(1, newVersionedKey.getValue().size());
assertEquals(Sets.newHashSet(key), newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
// the key will be deleted after TTL
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(0, newVersionedKey.getValue().size());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
} finally {
if (null != ksReader) {
ksReader.close();
}
}
assertNotNull(ksReader);
assertFalse(ksReader.isWatcherSet());
}
@Test
public void testReadKeySet() throws Exception {
String prefix = RandomStringUtils.randomAlphabetic(16);
ByteSequence beginKeyBs = ByteSequence.from(prefix + "-000", StandardCharsets.UTF_8);
ByteSequence endKeyBs = ByteSequence.from(prefix + "-999", StandardCharsets.UTF_8);
try (KeySetReader<String> ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
beginKeyBs,
endKeyBs
)) {
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.read());
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertFalse(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
Set<String> expectedKeySet = new HashSet<>();
for (int i = 0; i < 20; i++) {
// update a value
String key = String.format("%s-%03d", prefix, i);
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
expectedKeySet.add(key);
FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs));
// update the value should not change local value
assertEquals(versionedKeys, ksReader.getLocalValue());
// read the key again
Versioned<Set<String>> newVersionedKey = FutureUtils.result(ksReader.read());
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(expectedKeySet, newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
}
}
}
@Test
public void testWatchKeySet() throws Exception {
String prefix = RandomStringUtils.randomAlphabetic(16);
ByteSequence beginKeyBs = ByteSequence.from(prefix + "-000", StandardCharsets.UTF_8);
ByteSequence endKeyBs = ByteSequence.from(prefix + "-999", StandardCharsets.UTF_8);
KeySetReader<String> ksReader = null;
try {
ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
beginKeyBs,
endKeyBs
);
LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>();
Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications);
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer));
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertTrue(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
Versioned<Set<String>> newVersionedKey = notifications.take();
assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(versionedKeys, newVersionedKey);
versionedKeys = newVersionedKey;
Set<String> expectedKeySet = new HashSet<>();
for (int i = 0; i < 20; i++) {
// update a value
String key = String.format("%s-%03d", prefix, i);
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
expectedKeySet.add(key);
FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs));
// we should get notified with updated key set
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(expectedKeySet, newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
}
for (int i = 0; i < 20; i++) {
// delete the key
String key = String.format("%s-%03d", prefix, i);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
expectedKeySet.remove(key);
FutureUtils.result(etcdClient.getKVClient().delete(keyBs));
// we should get notified with updated key set
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(expectedKeySet, newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
}
} finally {
if (null != ksReader) {
ksReader.close();
}
}
assertNotNull(ksReader);
assertFalse(ksReader.isWatcherSet());
}
@Test
public void testWatchKeySetWithTTL() throws Exception {
String prefix = RandomStringUtils.randomAlphabetic(16);
ByteSequence beginKeyBs = ByteSequence.from(prefix + "-000", StandardCharsets.UTF_8);
ByteSequence endKeyBs = ByteSequence.from(prefix + "-999", StandardCharsets.UTF_8);
KeySetReader<String> ksReader = null;
try {
ksReader = new KeySetReader<>(
etcdClient,
BYTE_SEQUENCE_STRING_FUNCTION,
beginKeyBs,
endKeyBs
);
LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>();
Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications);
// key not exists
Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer));
assertTrue(
"VersionedKeys : " + versionedKeys,
((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L);
assertEquals(0, versionedKeys.getValue().size());
assertTrue(ksReader.isWatcherSet());
// keys should be cached
assertEquals(versionedKeys, ksReader.getLocalValue());
// no watch event should be issued
Versioned<Set<String>> newVersionedKey = notifications.take();
assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(versionedKeys, newVersionedKey);
versionedKeys = newVersionedKey;
// create keys with ttl
long leaseId = FutureUtils.result(etcdClient.getLeaseClient().grant(1)).getID();
CloseableClient ka = etcdClient.getLeaseClient().keepAlive(leaseId, Observers.observer(response -> {
}));
Set<String> expectedKeySet = new HashSet<>();
for (int i = 0; i < 20; i++) {
String key = String.format("%s-%03d", prefix, i);
String value = RandomStringUtils.randomAlphabetic(32);
ByteSequence keyBs = ByteSequence.from(key, StandardCharsets.UTF_8);
ByteSequence valueBs = ByteSequence.from(value, StandardCharsets.UTF_8);
expectedKeySet.add(key);
FutureUtils.result(etcdClient.getKVClient()
.put(keyBs, valueBs, PutOption.newBuilder().withLeaseId(leaseId).build()));
// we should get notified with updated key set
newVersionedKey = notifications.take();
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertEquals(expectedKeySet, newVersionedKey.getValue());
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
versionedKeys = newVersionedKey;
}
// stop keep alive all the keys should be expired.
ka.close();
// all the keys will be deleted after TTL in same batch.
newVersionedKey = notifications.take();
// local value should be changed
assertEquals(newVersionedKey, ksReader.getLocalValue());
assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion()));
assertTrue(newVersionedKey.getValue().isEmpty());
} finally {
if (null != ksReader) {
ksReader.close();
}
}
assertNotNull(ksReader);
assertFalse(ksReader.isWatcherSet());
}
}
| 581 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/test/java/org/apache/bookkeeper/metadata/etcd/helpers/HelpersTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import io.etcd.jetcd.ByteSequence;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.metadata.etcd.testing.EtcdTestBase;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Test;
/**
* Integration test helpers.
*/
@Slf4j
public class HelpersTest extends EtcdTestBase {
private static final Function<ByteSequence, String> BYTE_SEQUENCE_STRING_FUNCTION =
bs -> bs.toString(UTF_8);
private static String getKey(String scope, int i) {
return String.format("%s-key-%010d", scope, i);
}
private String scope;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
scope = RandomStringUtils.randomAlphabetic(8);
}
@Test
public void testEmptyKeyStream() throws Exception {
KeyStream<String> ks = new KeyStream<>(
etcdClient.getKVClient(),
ByteSequence.from(getKey(scope, 0), UTF_8),
ByteSequence.from(getKey(scope, 100), UTF_8),
BYTE_SEQUENCE_STRING_FUNCTION
);
List<String> values = result(ks.readNext());
assertTrue(values.isEmpty());
// read the values again
values = result(ks.readNext());
assertTrue(values.isEmpty());
}
@Test
public void testKeyStreamBatch1() throws Exception {
testKeyStream(20, 1);
}
@Test
public void testKeyStreamBatch2() throws Exception {
testKeyStream(20, 2);
}
@Test
public void testKeyStreamBatch7() throws Exception {
testKeyStream(20, 7);
}
@Test
public void testKeyStreamBatch10() throws Exception {
testKeyStream(20, 10);
}
@Test
public void testKeyStreamBatch20() throws Exception {
testKeyStream(20, 20);
}
@Test
public void testKeyStreamBatch40() throws Exception {
testKeyStream(20, 40);
}
@Test
public void testKeyStreamBatchUnlimited() throws Exception {
testKeyStream(20, 0);
}
private void testKeyStream(int numKeys, int batchSize) throws Exception {
for (int i = 0; i < numKeys; i++) {
String key = getKey(scope, i);
ByteSequence keyBs = ByteSequence.from(key.getBytes(UTF_8));
result(etcdClient.getKVClient().put(keyBs, keyBs));
}
KeyStream<Integer> ks = openKeyStream(batchSize);
AtomicInteger numReceived = new AtomicInteger(0);
while (true) {
List<Integer> values = result(ks.readNext());
log.info("Received values : {}", values);
if (values.isEmpty()) {
break;
}
for (int value : values) {
assertEquals(numReceived.getAndIncrement(), value);
}
}
assertEquals(numKeys, numReceived.get());
}
private void testKeyIterator(int numKeys, int batchSize) throws Exception {
for (int i = 0; i < numKeys; i++) {
String key = getKey(scope, i);
ByteSequence keyBs = ByteSequence.from(key, UTF_8);
result(etcdClient.getKVClient().put(keyBs, keyBs));
}
KeyStream<Integer> ks = openKeyStream(batchSize);
KeyIterator<Integer> ki = new KeyIterator<>(ks);
AtomicInteger numReceived = new AtomicInteger(0);
while (ki.hasNext()) {
List<Integer> values = ki.next();
log.info("Received values : {}", values);
if (values.isEmpty()) {
break;
}
for (int value : values) {
assertEquals(numReceived.getAndIncrement(), value);
}
}
assertEquals(numKeys, numReceived.get());
}
@Test
public void testKeyIteratorBatch1() throws Exception {
testKeyIterator(20, 1);
}
@Test
public void testKeyIteratorBatch2() throws Exception {
testKeyIterator(20, 2);
}
@Test
public void testKeyIteratorBatch7() throws Exception {
testKeyIterator(20, 7);
}
@Test
public void testKeyIteratorBatch10() throws Exception {
testKeyIterator(20, 10);
}
@Test
public void testKeyIteratorBatch20() throws Exception {
testKeyIterator(20, 20);
}
@Test
public void testKeyIteratorBatch40() throws Exception {
testKeyIterator(20, 40);
}
@Test
public void testKeyIteratorBatchUnlimited() throws Exception {
testKeyIterator(20, 0);
}
private KeyStream<Integer> openKeyStream(int batchSize) {
KeyStream<Integer> ks = new KeyStream<>(
etcdClient.getKVClient(),
ByteSequence.from(getKey(scope, 0).getBytes(UTF_8)),
ByteSequence.from(getKey(scope, Integer.MAX_VALUE).getBytes(UTF_8)),
bs -> {
String[] keyParts = StringUtils.split(bs.toString(UTF_8), '-');
try {
return Integer.parseInt(keyParts[2]);
} catch (NumberFormatException nfe) {
log.error("Failed to parse key string '{}' : ",
bs.toString(UTF_8), nfe);
return -0xabcd;
}
},
batchSize
);
return ks;
}
}
| 582 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdWatchClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static io.etcd.jetcd.common.exception.EtcdExceptionFactory.newClosedWatchClientException;
import static io.etcd.jetcd.common.exception.EtcdExceptionFactory.newEtcdException;
import static io.etcd.jetcd.common.exception.EtcdExceptionFactory.toEtcdException;
import com.google.common.base.Strings;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.EtcdConnectionManager;
import io.etcd.jetcd.api.WatchCancelRequest;
import io.etcd.jetcd.api.WatchCreateRequest;
import io.etcd.jetcd.api.WatchGrpc;
import io.etcd.jetcd.api.WatchRequest;
import io.etcd.jetcd.api.WatchResponse;
import io.etcd.jetcd.common.exception.ErrorCode;
import io.etcd.jetcd.common.exception.EtcdException;
import io.etcd.jetcd.common.exception.EtcdExceptionFactory;
import io.etcd.jetcd.options.WatchOption;
import io.etcd.jetcd.watch.WatchResponseWithError;
import io.grpc.Status;
import io.grpc.Status.Code;
import io.grpc.stub.StreamObserver;
import java.util.LinkedList;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashSet;
/**
* An async watch implementation.
*/
@Slf4j
public class EtcdWatchClient implements AutoCloseable {
private final EtcdConnectionManager connMgr;
private final WatchGrpc.WatchStub stub;
private volatile StreamObserver<WatchRequest> grpcWatchStreamObserver;
// watchers stores a mapping between watchID -> EtcdWatcher.
private final ConcurrentLongHashMap<EtcdWatcher> watchers =
ConcurrentLongHashMap.<EtcdWatcher>newBuilder().build();
private final LinkedList<EtcdWatcher> pendingWatchers = new LinkedList<>();
private final ConcurrentLongHashSet cancelSet = ConcurrentLongHashSet.newBuilder().build();
// scheduler
private final OrderedScheduler scheduler;
private final ScheduledExecutorService watchExecutor;
// close state
private CompletableFuture<Void> closeFuture = null;
public EtcdWatchClient(Client client) {
this.connMgr = new EtcdConnectionManager(client);
this.stub = connMgr.newWatchStub();
this.scheduler = OrderedScheduler.newSchedulerBuilder()
.name("etcd-watcher-scheduler")
.numThreads(Runtime.getRuntime().availableProcessors())
.build();
this.watchExecutor = this.scheduler.chooseThread();
}
public synchronized boolean isClosed() {
return closeFuture != null;
}
public CompletableFuture<EtcdWatcher> watch(ByteSequence key,
BiConsumer<io.etcd.jetcd.watch.WatchResponse, Throwable> consumer) {
return watch(key, WatchOption.DEFAULT, consumer);
}
public CompletableFuture<EtcdWatcher> watch(ByteSequence key,
WatchOption watchOption,
BiConsumer<io.etcd.jetcd.watch.WatchResponse, Throwable> consumer) {
return CompletableFuture.supplyAsync(() -> {
if (isClosed()) {
throw EtcdExceptionFactory.newClosedWatchClientException();
}
EtcdWatcher watcher = new EtcdWatcher(key, watchOption, scheduler.chooseThread(), this);
watcher.addConsumer(consumer);
pendingWatchers.add(watcher);
if (pendingWatchers.size() == 1) {
WatchRequest request = toWatchCreateRequest(watcher);
getGrpcWatchStreamObserver().onNext(request);
}
return watcher;
}, watchExecutor);
}
// notifies all watchers about a exception. it doesn't close watchers.
// it is the responsibility of user to close watchers.
private void notifyWatchers(EtcdException e) {
WatchResponseWithError wre = new WatchResponseWithError(e);
this.pendingWatchers.forEach(watcher -> watcher.notifyWatchResponse(wre));
this.pendingWatchers.clear();
this.watchers.values().forEach(watcher -> watcher.notifyWatchResponse(wre));
this.watchers.clear();
}
public CompletableFuture<Void> unwatch(EtcdWatcher watcher) {
return CompletableFuture.runAsync(() -> cancelWatcher(watcher.getWatchID()), watchExecutor);
}
private void cancelWatcher(long watchID) {
if (isClosed()) {
return;
}
if (cancelSet.contains(watchID)) {
return;
}
watchers.remove(watchID);
cancelSet.add(watchID);
WatchCancelRequest watchCancelRequest = WatchCancelRequest.newBuilder()
.setWatchId(watchID)
.build();
WatchRequest cancelRequest = WatchRequest.newBuilder()
.setCancelRequest(watchCancelRequest)
.build();
getGrpcWatchStreamObserver().onNext(cancelRequest);
}
public CompletableFuture<Void> closeAsync() {
CompletableFuture<Void> future;
synchronized (this) {
if (null == closeFuture) {
log.info("Closing watch client");
closeFuture = CompletableFuture.runAsync(() -> {
notifyWatchers(newClosedWatchClientException());
closeGrpcWatchStreamObserver();
}, watchExecutor);
}
future = closeFuture;
}
return future.whenComplete((ignored, cause) -> {
this.scheduler.shutdown();
});
}
@Override
public void close() {
try {
FutureUtils.result(closeAsync());
} catch (Exception e) {
log.warn("Encountered exceptions on closing watch client", e);
}
this.scheduler.forceShutdown(10, TimeUnit.SECONDS);
}
private StreamObserver<WatchResponse> createWatchStreamObserver() {
return new StreamObserver<WatchResponse>() {
@Override
public void onNext(WatchResponse watchResponse) {
if (isClosed()) {
return;
}
watchExecutor.submit(() -> processWatchResponse(watchResponse));
}
@Override
public void onError(Throwable t) {
if (isClosed()) {
return;
}
watchExecutor.submit(() -> processError(t));
}
@Override
public void onCompleted() {
}
};
}
private void processWatchResponse(WatchResponse watchResponse) {
// prevents grpc on sending watchResponse to a closed watch client.
if (isClosed()) {
return;
}
if (watchResponse.getCreated()) {
processCreate(watchResponse);
} else if (watchResponse.getCanceled()) {
processCanceled(watchResponse);
} else {
processEvents(watchResponse);
}
}
private void processError(Throwable t) {
// prevents grpc on sending error to a closed watch client.
if (this.isClosed()) {
return;
}
Status status = Status.fromThrowable(t);
if (this.isHaltError(status) || this.isNoLeaderError(status)) {
this.notifyWatchers(toEtcdException(status));
this.closeGrpcWatchStreamObserver();
this.cancelSet.clear();
return;
}
// resume with a delay; avoiding immediate retry on a long connection downtime.
scheduler.schedule(this::resume, 500, TimeUnit.MILLISECONDS);
}
private void resume() {
this.closeGrpcWatchStreamObserver();
this.cancelSet.clear();
this.resumeWatchers();
}
private synchronized StreamObserver<WatchRequest> getGrpcWatchStreamObserver() {
if (this.grpcWatchStreamObserver == null) {
this.grpcWatchStreamObserver = this.stub.watch(this.createWatchStreamObserver());
}
return this.grpcWatchStreamObserver;
}
// closeGrpcWatchStreamObserver closes the underlying grpc watch stream.
private void closeGrpcWatchStreamObserver() {
if (this.grpcWatchStreamObserver == null) {
return;
}
this.grpcWatchStreamObserver.onCompleted();
this.grpcWatchStreamObserver = null;
}
private void processCreate(WatchResponse response) {
EtcdWatcher watcher = this.pendingWatchers.poll();
this.sendNextWatchCreateRequest();
if (watcher == null) {
// shouldn't happen
// may happen due to duplicate watch create responses.
log.warn("Watch client receives watch create response but find no corresponding watcher");
return;
}
if (watcher.isClosed()) {
return;
}
if (response.getWatchId() == -1) {
watcher.notifyWatchResponse(new WatchResponseWithError(
newEtcdException(ErrorCode.INTERNAL, "etcd server failed to create watch id")));
return;
}
if (watcher.getRevision() == 0) {
watcher.setRevision(response.getHeader().getRevision());
}
watcher.setWatchID(response.getWatchId());
this.watchers.put(watcher.getWatchID(), watcher);
}
/**
* chooses the next resuming watcher to register with the grpc stream.
*/
private Optional<WatchRequest> nextResume() {
EtcdWatcher pendingWatcher = this.pendingWatchers.peek();
if (pendingWatcher != null) {
return Optional.of(this.toWatchCreateRequest(pendingWatcher));
}
return Optional.empty();
}
private void sendNextWatchCreateRequest() {
this.nextResume().ifPresent(
(nextWatchRequest -> this.getGrpcWatchStreamObserver().onNext(nextWatchRequest)));
}
private void processEvents(WatchResponse response) {
EtcdWatcher watcher = this.watchers.get(response.getWatchId());
if (watcher == null) {
// cancel server side watcher.
this.cancelWatcher(response.getWatchId());
return;
}
if (response.getCompactRevision() != 0) {
watcher.notifyWatchResponse(new WatchResponseWithError(
EtcdExceptionFactory
.newCompactedException(response.getCompactRevision())));
return;
}
if (response.getEventsCount() == 0) {
watcher.setRevision(response.getHeader().getRevision());
return;
}
watcher.notifyWatchResponse(new WatchResponseWithError(new io.etcd.jetcd.watch.WatchResponse(response)));
watcher.setRevision(
response
.getEvents(response.getEventsCount() - 1)
.getKv().getModRevision() + 1);
}
private void resumeWatchers() {
this.watchers.values().forEach(watcher -> {
if (watcher.isClosed()) {
return;
}
watcher.setWatchID(-1);
this.pendingWatchers.add(watcher);
});
this.watchers.clear();
this.sendNextWatchCreateRequest();
}
private void processCanceled(WatchResponse response) {
EtcdWatcher watcher = this.watchers.get(response.getWatchId());
this.cancelSet.remove(response.getWatchId());
if (watcher == null) {
return;
}
String reason = response.getCancelReason();
if (Strings.isNullOrEmpty(reason)) {
watcher.notifyWatchResponse(new WatchResponseWithError(newEtcdException(
ErrorCode.OUT_OF_RANGE,
"etcdserver: mvcc: required revision is a future revision"))
);
} else {
watcher.notifyWatchResponse(
new WatchResponseWithError(newEtcdException(ErrorCode.FAILED_PRECONDITION, reason)));
}
}
private static boolean isNoLeaderError(Status status) {
return status.getCode() == Code.UNAVAILABLE
&& "etcdserver: no leader".equals(status.getDescription());
}
private static boolean isHaltError(Status status) {
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
return status.getCode() != Code.UNAVAILABLE && status.getCode() != Code.INTERNAL;
}
private static WatchRequest toWatchCreateRequest(EtcdWatcher watcher) {
ByteString key = UnsafeByteOperations.unsafeWrap(watcher.getKey().getBytes());
WatchOption option = watcher.getWatchOption();
WatchCreateRequest.Builder builder = WatchCreateRequest.newBuilder()
.setKey(key)
.setPrevKv(option.isPrevKV())
.setProgressNotify(option.isProgressNotify())
.setStartRevision(watcher.getRevision());
option.getEndKey()
.ifPresent(endKey -> builder.setRangeEnd(UnsafeByteOperations.unsafeWrap(endKey.getBytes())));
if (option.isNoDelete()) {
builder.addFilters(WatchCreateRequest.FilterType.NODELETE);
}
if (option.isNoPut()) {
builder.addFilters(WatchCreateRequest.FilterType.NOPUT);
}
return WatchRequest.newBuilder().setCreateRequest(builder).build();
}
}
| 583 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.BUCKETS_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.COOKIES_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.END_SEP;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.INSTANCEID_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.LAYOUT_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.LEDGERS_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.MEMBERS_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.READONLY_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.UR_NODE;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.WRITEABLE_NODE;
import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.net.BookieId;
import org.apache.commons.lang3.StringUtils;
/**
* Utils for etcd based metadata store.
*/
@NoArgsConstructor(access = AccessLevel.PRIVATE)
final class EtcdUtils {
static String getScopeEndKey(String scope) {
return String.format("%s%s", scope, END_SEP);
}
static String getBucketsPath(String scope) {
return String.format("%s/%s", scope, BUCKETS_NODE);
}
static String getBucketPath(String scope, int bucket) {
return String.format("%s/%s/%03d",
scope,
BUCKETS_NODE,
bucket);
}
static String getLayoutKey(String scope) {
return String.format("%s/%s", scope, LAYOUT_NODE);
}
static String getLedgersPath(String scope) {
return String.format("%s/%s", scope, LEDGERS_NODE);
}
static String getLedgerKey(String scope, long ledgerId) {
return getLedgerKey(scope, 0, ledgerId);
}
static String getLedgerKey(String scope, long scopeId, long ledgerId) {
UUID uuid = new UUID(scopeId, ledgerId);
return String.format("%s/ledgers/%s", scope, uuid);
}
static UUID parseLedgerKey(String ledgerKey) {
String[] keyParts = StringUtils.split(ledgerKey, '/');
return UUID.fromString(keyParts[keyParts.length - 1]);
}
static String getBookiesPath(String scope) {
return String.format("%s/%s", scope, MEMBERS_NODE);
}
static String getBookiesEndPath(String scope) {
return String.format("%s/%s%s", scope, MEMBERS_NODE, END_SEP);
}
static String getWritableBookiesPath(String scope) {
return String.format("%s/%s/%s", scope, MEMBERS_NODE, WRITEABLE_NODE);
}
static String getWritableBookiesBeginPath(String scope) {
return String.format("%s/%s/%s/", scope, MEMBERS_NODE, WRITEABLE_NODE);
}
static String getWritableBookiesEndPath(String scope) {
return String.format("%s/%s/%s%s", scope, MEMBERS_NODE, WRITEABLE_NODE, END_SEP);
}
static String getWritableBookiePath(String scope, BookieId bookieId) {
return String.format("%s/%s/%s/%s",
scope, MEMBERS_NODE, WRITEABLE_NODE, bookieId.toString());
}
static String getReadonlyBookiesPath(String scope) {
return String.format("%s/%s/%s", scope, MEMBERS_NODE, READONLY_NODE);
}
static String getReadonlyBookiesBeginPath(String scope) {
return String.format("%s/%s/%s/", scope, MEMBERS_NODE, READONLY_NODE);
}
static String getReadonlyBookiesEndPath(String scope) {
return String.format("%s/%s/%s%s", scope, MEMBERS_NODE, READONLY_NODE, END_SEP);
}
static String getReadonlyBookiePath(String scope, BookieId bookieId) {
return String.format("%s/%s/%s/%s",
scope, MEMBERS_NODE, READONLY_NODE, bookieId.toString());
}
static String getCookiesPath(String scope) {
return String.format("%s/%s", scope, COOKIES_NODE);
}
static String getCookiePath(String scope, BookieId bookieId) {
return String.format("%s/%s/%s", scope, COOKIES_NODE, bookieId.toString());
}
static String getClusterInstanceIdPath(String scope) {
return String.format("%s/%s", scope, INSTANCEID_NODE);
}
static String getUnderreplicationPath(String scope) {
return String.format("%s/%s", scope, UR_NODE);
}
static <T> T ioResult(CompletableFuture<T> future) throws IOException {
return FutureUtils.result(future, cause -> {
if (cause instanceof IOException) {
return (IOException) cause;
} else {
return new IOException(cause);
}
});
}
static <T> T msResult(CompletableFuture<T> future) throws MetadataStoreException {
return FutureUtils.result(future, cause -> {
if (cause instanceof MetadataStoreException) {
return (MetadataStoreException) cause;
} else {
return new MetadataStoreException(cause);
}
});
}
static <T> T msResult(CompletableFuture<T> future,
long timeout,
TimeUnit timeUnit)
throws MetadataStoreException, TimeoutException {
return FutureUtils.result(future, cause -> {
if (cause instanceof MetadataStoreException) {
return (MetadataStoreException) cause;
} else {
return new MetadataStoreException(cause);
}
}, timeout, timeUnit);
}
public static long toLong(byte[] memory, int index) {
return ((long) memory[index] & 0xff) << 56
| ((long) memory[index + 1] & 0xff) << 48
| ((long) memory[index + 2] & 0xff) << 40
| ((long) memory[index + 3] & 0xff) << 32
| ((long) memory[index + 4] & 0xff) << 24
| ((long) memory[index + 5] & 0xff) << 16
| ((long) memory[index + 6] & 0xff) << 8
| (long) memory[index + 7] & 0xff;
}
/**
* Convert a long number to a bytes array.
*
* @param value the long number
* @return the bytes array
*/
public static byte[] toBytes(long value) {
byte[] memory = new byte[8];
toBytes(value, memory, 0);
return memory;
}
public static void toBytes(long value, byte[] memory, int index) {
memory[index] = (byte) (value >>> 56);
memory[index + 1] = (byte) (value >>> 48);
memory[index + 2] = (byte) (value >>> 40);
memory[index + 3] = (byte) (value >>> 32);
memory[index + 4] = (byte) (value >>> 24);
memory[index + 5] = (byte) (value >>> 16);
memory[index + 6] = (byte) (value >>> 8);
memory[index + 7] = (byte) value;
}
}
| 584 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdRegistrationClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import com.google.common.collect.Maps;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.metadata.etcd.helpers.KeySetReader;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.Versioned;
/**
* Etcd based registration client.
*/
@Slf4j
class EtcdRegistrationClient implements RegistrationClient {
private static Function<ByteSequence, BookieId> newBookieSocketAddressFunc(String prefix) {
return bs -> {
String addrStr = bs.toString(StandardCharsets.UTF_8);
return BookieId.parse(addrStr.replace(prefix, ""));
};
}
private final EtcdWatchClient watchClient;
private final KeySetReader<BookieId> writableBookiesReader;
private final KeySetReader<BookieId> readonlyBookiesReader;
private Map<RegistrationListener, Consumer<Versioned<Set<BookieId>>>> writableListeners =
Maps.newHashMap();
private Map<RegistrationListener, Consumer<Versioned<Set<BookieId>>>> readonlyListeners =
Maps.newHashMap();
EtcdRegistrationClient(String scope,
Client client) {
this.watchClient = new EtcdWatchClient(client);
this.writableBookiesReader = new KeySetReader<>(
client,
watchClient,
newBookieSocketAddressFunc(EtcdUtils.getWritableBookiesBeginPath(scope)),
ByteSequence.from(EtcdUtils.getWritableBookiesBeginPath(scope), StandardCharsets.UTF_8),
ByteSequence.from(EtcdUtils.getWritableBookiesEndPath(scope), StandardCharsets.UTF_8)
);
this.readonlyBookiesReader = new KeySetReader<>(
client,
watchClient,
newBookieSocketAddressFunc(EtcdUtils.getReadonlyBookiesBeginPath(scope)),
ByteSequence.from(EtcdUtils.getReadonlyBookiesBeginPath(scope), StandardCharsets.UTF_8),
ByteSequence.from(EtcdUtils.getReadonlyBookiesEndPath(scope), StandardCharsets.UTF_8)
);
}
@Override
public void close() {
this.writableBookiesReader.close();
this.readonlyBookiesReader.close();
this.watchClient.close();
}
@Override
public CompletableFuture<Versioned<Set<BookieId>>> getWritableBookies() {
return writableBookiesReader.read();
}
@Override
public CompletableFuture<Versioned<Set<BookieId>>> getAllBookies() {
return FutureUtils.exception(new BKException.BKIllegalOpException());
}
@Override
public CompletableFuture<Versioned<Set<BookieId>>> getReadOnlyBookies() {
return readonlyBookiesReader.read();
}
private static CompletableFuture<Void> registerListener(
KeySetReader<BookieId> keySetReader,
Map<RegistrationListener, Consumer<Versioned<Set<BookieId>>>> listeners,
RegistrationListener listener
) {
Consumer<Versioned<Set<BookieId>>> consumer;
synchronized (listeners) {
consumer = listeners.get(listener);
if (null != consumer) {
// already registered
return FutureUtils.Void();
} else {
consumer = bookies -> listener.onBookiesChanged(bookies);
listeners.put(listener, consumer);
}
}
return keySetReader
.readAndWatch(consumer)
.thenApply(ignored -> null);
}
private static CompletableFuture<Void> unregisterListener(
KeySetReader<BookieId> keySetReader,
Map<RegistrationListener, Consumer<Versioned<Set<BookieId>>>> listeners,
RegistrationListener listener
) {
Consumer<Versioned<Set<BookieId>>> consumer = listeners.get(listener);
if (null == consumer) {
return FutureUtils.Void();
} else {
return keySetReader.unwatch(consumer);
}
}
@Override
public CompletableFuture<Void> watchWritableBookies(RegistrationListener listener) {
return registerListener(
writableBookiesReader,
writableListeners,
listener
);
}
@Override
public void unwatchWritableBookies(RegistrationListener listener) {
unregisterListener(
writableBookiesReader,
writableListeners,
listener
);
}
@Override
public CompletableFuture<Void> watchReadOnlyBookies(RegistrationListener listener) {
return registerListener(
readonlyBookiesReader,
readonlyListeners,
listener
);
}
@Override
public void unwatchReadOnlyBookies(RegistrationListener listener) {
unregisterListener(
readonlyBookiesReader,
readonlyListeners,
listener
);
}
}
| 585 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdMetadataBookieDriver.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.meta.MetadataBookieDriver;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* Etcd based metadata bookie driver.
*/
@Slf4j
public class EtcdMetadataBookieDriver extends EtcdMetadataDriverBase implements MetadataBookieDriver {
// register myself
static {
MetadataDrivers.registerBookieDriver(
SCHEME, EtcdMetadataBookieDriver.class);
log.info("Registered etcd metadata bookie driver");
}
ServerConfiguration conf;
EtcdBookieRegister bkRegister;
RegistrationManager regMgr;
@Override
public synchronized MetadataBookieDriver initialize(ServerConfiguration conf,
StatsLogger statsLogger)
throws MetadataException {
super.initialize(conf, statsLogger);
this.conf = conf;
this.statsLogger = statsLogger;
return null;
}
@Override
public synchronized RegistrationManager createRegistrationManager() {
if (null == bkRegister) {
bkRegister = new EtcdBookieRegister(
client.getLeaseClient(),
TimeUnit.MILLISECONDS.toSeconds(conf.getZkTimeout())
).start();
}
if (null == regMgr) {
regMgr = new EtcdRegistrationManager(
client,
keyPrefix,
bkRegister
);
}
return regMgr;
}
@Override
public void close() {
RegistrationManager rmToClose;
EtcdBookieRegister bkRegisterToClose;
synchronized (this) {
rmToClose = regMgr;
regMgr = null;
bkRegisterToClose = bkRegister;
bkRegister = null;
}
if (null != rmToClose) {
rmToClose.close();
}
if (null != bkRegisterToClose) {
bkRegisterToClose.close();
}
super.close();
}
}
| 586 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdMetadataDriverBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import com.google.common.collect.Lists;
import io.etcd.jetcd.Client;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.net.ServiceURI;
import org.apache.bookkeeper.conf.AbstractConfiguration;
import org.apache.bookkeeper.meta.LayoutManager;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.exceptions.Code;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.commons.configuration.ConfigurationException;
/**
* This is a mixin class for supporting etcd based metadata drivers.
*/
@Slf4j
class EtcdMetadataDriverBase implements AutoCloseable {
static final String SCHEME = "etcd";
protected AbstractConfiguration<?> conf;
protected StatsLogger statsLogger;
// service uri
protected Client client;
protected String keyPrefix;
// managers
protected LayoutManager layoutManager;
protected LedgerManagerFactory lmFactory;
public String getScheme() {
return SCHEME;
}
/**
* Initialize metadata driver with provided configuration and <tt>statsLogger</tt>.
*
* @param conf configuration to initialize metadata driver
* @param statsLogger stats logger
* @throws MetadataException
*/
protected void initialize(AbstractConfiguration<?> conf, StatsLogger statsLogger)
throws MetadataException {
this.conf = conf;
this.statsLogger = statsLogger;
final String metadataServiceUriStr;
try {
metadataServiceUriStr = conf.getMetadataServiceUri();
} catch (ConfigurationException ce) {
log.error("Failed to retrieve metadata service uri from configuration", ce);
throw new MetadataException(Code.INVALID_METADATA_SERVICE_URI, ce);
}
ServiceURI serviceURI = ServiceURI.create(metadataServiceUriStr);
this.keyPrefix = serviceURI.getServicePath();
List<String> etcdEndpoints = Lists.newArrayList(serviceURI.getServiceHosts())
.stream()
.map(host -> String.format("http://%s", host))
.collect(Collectors.toList());
log.info("Initializing etcd metadata driver : etcd endpoints = {}, key scope = {}",
etcdEndpoints, keyPrefix);
synchronized (this) {
this.client = Client.builder()
.endpoints(etcdEndpoints.toArray(new String[etcdEndpoints.size()]))
.build();
}
this.layoutManager = new EtcdLayoutManager(
client,
keyPrefix
);
}
public LayoutManager getLayoutManager() {
return layoutManager;
}
public synchronized LedgerManagerFactory getLedgerManagerFactory()
throws MetadataException {
if (null == lmFactory) {
try {
lmFactory = new EtcdLedgerManagerFactory();
lmFactory.initialize(conf, layoutManager, EtcdLedgerManagerFactory.VERSION);
} catch (IOException ioe) {
throw new MetadataException(
Code.METADATA_SERVICE_ERROR, "Failed to initialize ledger manager factory", ioe);
}
}
return lmFactory;
}
@Override
public synchronized void close() {
if (null != lmFactory) {
try {
lmFactory.close();
} catch (IOException e) {
log.error("Failed to close ledger manager factory", e);
}
lmFactory = null;
}
if (null != client) {
client.close();
client = null;
}
}
}
| 587 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdLayoutManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.ioResult;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.KV;
import io.etcd.jetcd.kv.DeleteResponse;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.kv.TxnResponse;
import io.etcd.jetcd.op.Cmp;
import io.etcd.jetcd.op.CmpTarget;
import io.etcd.jetcd.options.GetOption;
import io.etcd.jetcd.options.PutOption;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.meta.LayoutManager;
import org.apache.bookkeeper.meta.LedgerLayout;
/**
* Etcd based layout manager.
*/
@Slf4j
@Getter(AccessLevel.PACKAGE)
class EtcdLayoutManager implements LayoutManager {
private final Client client;
private final KV kvClient;
private final String scope;
private final ByteSequence layoutKey;
EtcdLayoutManager(Client client, String scope) {
this.client = client;
this.kvClient = client.getKVClient();
this.scope = scope;
this.layoutKey = ByteSequence.from(EtcdUtils.getLayoutKey(scope), StandardCharsets.UTF_8);
}
@Override
public LedgerLayout readLedgerLayout() throws IOException {
GetResponse response = ioResult(kvClient.get(layoutKey, GetOption.DEFAULT));
if (response.getCount() <= 0) {
return null;
} else {
byte[] layoutData = response.getKvs().get(0).getValue().getBytes();
return LedgerLayout.parseLayout(layoutData);
}
}
@Override
public void storeLedgerLayout(LedgerLayout layout) throws IOException {
ByteSequence layoutData = ByteSequence.from(layout.serialize());
TxnResponse response = ioResult(kvClient.txn()
.If(new Cmp(layoutKey, Cmp.Op.GREATER, CmpTarget.createRevision(0)))
.Then(io.etcd.jetcd.op.Op.get(layoutKey, GetOption.DEFAULT))
.Else(io.etcd.jetcd.op.Op.put(layoutKey, layoutData, PutOption.DEFAULT))
.commit());
// key doesn't exist and we created the key
if (!response.isSucceeded()) {
return;
// key exists and we retrieved the key
} else {
GetResponse resp = response.getGetResponses().get(0);
if (resp.getCount() <= 0) {
// fail to put key/value but key is not found
throw new IOException("Creating layout node '" + layoutKey.toString(StandardCharsets.UTF_8)
+ "' failed due to it already exists but no layout node is found");
} else {
throw new LedgerLayoutExistsException(
"Ledger layout already exists under '" + layoutKey.toString(StandardCharsets.UTF_8) + "'");
}
}
}
@Override
public void deleteLedgerLayout() throws IOException {
DeleteResponse response = ioResult(kvClient.delete(layoutKey));
if (response.getDeleted() > 0) {
if (log.isDebugEnabled()) {
log.debug("Successfully delete layout '{}'", layoutKey.toString(StandardCharsets.UTF_8));
}
return;
} else {
throw new IOException("No ledger layout is found under '" + layoutKey.toString(StandardCharsets.UTF_8)
+ "'");
}
}
}
| 588 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdLedgerManagerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static com.google.common.base.Preconditions.checkArgument;
import io.etcd.jetcd.Client;
import java.io.IOException;
import org.apache.bookkeeper.common.net.ServiceURI;
import org.apache.bookkeeper.conf.AbstractConfiguration;
import org.apache.bookkeeper.meta.LayoutManager;
import org.apache.bookkeeper.meta.LedgerAuditorManager;
import org.apache.bookkeeper.meta.LedgerIdGenerator;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.LedgerUnderreplicationManager;
import org.apache.bookkeeper.replication.ReplicationException;
import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.zookeeper.KeeperException;
/**
* Etcd based ledger manager factory.
*/
class EtcdLedgerManagerFactory implements LedgerManagerFactory {
static final int VERSION = 0;
private String scope;
private Client client;
@Override
public int getCurrentVersion() {
return VERSION;
}
@Override
public LedgerManagerFactory initialize(AbstractConfiguration conf,
LayoutManager layoutManager,
int factoryVersion) throws IOException {
checkArgument(layoutManager instanceof EtcdLayoutManager);
EtcdLayoutManager etcdLayoutManager = (EtcdLayoutManager) layoutManager;
if (VERSION != factoryVersion) {
throw new IOException("Incompatible layout version found : " + factoryVersion);
}
try {
ServiceURI uri = ServiceURI.create(conf.getMetadataServiceUri());
this.scope = uri.getServicePath();
} catch (ConfigurationException e) {
throw new IOException("Invalid metadata service uri", e);
}
this.client = etcdLayoutManager.getClient();
return this;
}
@Override
public void close() {
// since layout manager is passed from outside.
// we don't need to close it here
}
@Override
public LedgerIdGenerator newLedgerIdGenerator() {
return new Etcd64bitIdGenerator(client.getKVClient(), scope);
}
@Override
public LedgerManager newLedgerManager() {
return new EtcdLedgerManager(client, scope);
}
@Override
public LedgerUnderreplicationManager newLedgerUnderreplicationManager()
throws ReplicationException.UnavailableException, InterruptedException, CompatibilityException {
throw new UnsupportedOperationException();
}
@Override
public LedgerAuditorManager newLedgerAuditorManager() throws IOException, InterruptedException {
throw new UnsupportedOperationException();
}
@Override
public void format(AbstractConfiguration<?> conf, LayoutManager lm)
throws InterruptedException, KeeperException, IOException {
try {
EtcdRegistrationManager.format(client.getKVClient(), scope);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException) e;
} else {
throw new IOException(e);
}
}
}
@Override
public boolean validateAndNukeExistingCluster(AbstractConfiguration<?> conf, LayoutManager lm)
throws InterruptedException, KeeperException, IOException {
try {
return EtcdRegistrationManager.nukeExistingCluster(client.getKVClient(), scope);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException) e;
} else {
throw new IOException(e);
}
}
}
}
| 589 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdMetadataClientDriver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import java.util.Optional;
import java.util.concurrent.ScheduledExecutorService;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.discover.RegistrationClient;
import org.apache.bookkeeper.meta.MetadataClientDriver;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* Etcd based metadata client driver.
*/
@Slf4j
public class EtcdMetadataClientDriver extends EtcdMetadataDriverBase implements MetadataClientDriver {
// register myself to driver manager
static {
MetadataDrivers.registerClientDriver(
SCHEME, EtcdMetadataClientDriver.class);
log.info("Registered etcd metadata client driver.");
}
ClientConfiguration conf;
ScheduledExecutorService scheduler;
RegistrationClient regClient;
@Override
public MetadataClientDriver initialize(ClientConfiguration conf,
ScheduledExecutorService scheduler,
StatsLogger statsLogger,
Optional<Object> ctx)
throws MetadataException {
super.initialize(conf, statsLogger);
this.conf = conf;
this.statsLogger = statsLogger;
return this;
}
@Override
public synchronized RegistrationClient getRegistrationClient() {
if (null == regClient) {
regClient = new EtcdRegistrationClient(keyPrefix, client);
}
return regClient;
}
@Override
public synchronized void close() {
if (null != regClient) {
regClient.close();
regClient = null;
}
super.close();
}
@Override
public void setSessionStateListener(SessionStateListener sessionStateListener) {
/*
* TODO: EtcdMetadataClientDriver has to implement this method.
*/
throw new UnsupportedOperationException();
}
}
| 590 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdLedgerManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import com.google.common.collect.Sets;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.KV;
import io.etcd.jetcd.KeyValue;
import io.etcd.jetcd.Txn;
import io.etcd.jetcd.common.exception.ClosedClientException;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.op.Cmp;
import io.etcd.jetcd.op.CmpTarget;
import io.etcd.jetcd.options.DeleteOption;
import io.etcd.jetcd.options.GetOption;
import io.etcd.jetcd.options.PutOption;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Consumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.LedgerMetadataSerDe;
import org.apache.bookkeeper.metadata.etcd.helpers.KeyIterator;
import org.apache.bookkeeper.metadata.etcd.helpers.KeyStream;
import org.apache.bookkeeper.metadata.etcd.helpers.ValueStream;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.LedgerMetadataListener;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.Processor;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.zookeeper.AsyncCallback.VoidCallback;
/**
* Etcd ledger manager.
*/
@Slf4j
class EtcdLedgerManager implements LedgerManager {
private final LedgerMetadataSerDe serDe = new LedgerMetadataSerDe();
private final String scope;
private final Client client;
private final KV kvClient;
private final EtcdWatchClient watchClient;
private final ConcurrentLongHashMap<ValueStream<LedgerMetadata>> watchers =
ConcurrentLongHashMap.<ValueStream<LedgerMetadata>>newBuilder().build();
private final ConcurrentMap<LedgerMetadataListener, LedgerMetadataConsumer> listeners =
new ConcurrentHashMap<>();
private volatile boolean closed = false;
EtcdLedgerManager(Client client,
String scope) {
this.client = client;
this.kvClient = client.getKVClient();
this.scope = scope;
this.watchClient = new EtcdWatchClient(client);
}
private boolean isClosed() {
return closed;
}
ValueStream<LedgerMetadata> getLedgerMetadataStream(long ledgerId) {
return watchers.get(ledgerId);
}
@Override
public CompletableFuture<Versioned<LedgerMetadata>> createLedgerMetadata(long ledgerId,
LedgerMetadata metadata) {
CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
String ledgerKey = EtcdUtils.getLedgerKey(scope, ledgerId);
log.info("Create ledger metadata under key {}", ledgerKey);
ByteSequence ledgerKeyBs = ByteSequence.from(ledgerKey, StandardCharsets.UTF_8);
final ByteSequence valueBs;
try {
valueBs = ByteSequence.from(serDe.serialize(metadata));
} catch (IOException ioe) {
promise.completeExceptionally(new BKException.BKMetadataSerializationException(ioe));
return promise;
}
kvClient.txn()
.If(new Cmp(
ledgerKeyBs,
Cmp.Op.GREATER,
CmpTarget.createRevision(0L)))
.Then(io.etcd.jetcd.op.Op.get(
ledgerKeyBs,
GetOption.newBuilder()
.withCountOnly(true)
.build()))
.Else(io.etcd.jetcd.op.Op.put(
ledgerKeyBs,
valueBs,
PutOption.DEFAULT))
.commit()
.thenAccept(resp -> {
if (resp.isSucceeded()) {
GetResponse getResp = resp.getGetResponses().get(0);
if (getResp.getCount() <= 0) {
// key doesn't exist but we fail to put the key
promise.completeExceptionally(new BKException.BKUnexpectedConditionException());
} else {
// key exists
promise.completeExceptionally(new BKException.BKLedgerExistException());
}
} else {
promise.complete(new Versioned<>(metadata,
new LongVersion(resp.getHeader().getRevision())));
}
})
.exceptionally(cause -> {
promise.completeExceptionally(new BKException.MetaStoreException());
return null;
});
return promise;
}
@Override
public CompletableFuture<Void> removeLedgerMetadata(long ledgerId, Version version) {
CompletableFuture<Void> promise = new CompletableFuture<>();
long revision = -0xabcd;
if (Version.NEW == version) {
log.error("Request to delete ledger {} metadata with version set to the initial one", ledgerId);
promise.completeExceptionally(new BKException.BKMetadataVersionException());
return promise;
} else if (Version.ANY != version) {
if (!(version instanceof LongVersion)) {
log.info("Not an instance of LongVersion : {}", ledgerId);
promise.completeExceptionally(new BKException.BKMetadataVersionException());
return promise;
} else {
revision = ((LongVersion) version).getLongVersion();
}
}
String ledgerKey = EtcdUtils.getLedgerKey(scope, ledgerId);
ByteSequence ledgerKeyBs = ByteSequence.from(ledgerKey, StandardCharsets.UTF_8);
Txn txn = kvClient.txn();
if (revision == -0xabcd) {
txn = txn.If(new Cmp(
ledgerKeyBs,
Cmp.Op.GREATER,
CmpTarget.createRevision(0L)
));
} else {
txn = txn.If(new Cmp(
ledgerKeyBs,
Cmp.Op.EQUAL,
CmpTarget.modRevision(revision)
));
}
txn
.Then(io.etcd.jetcd.op.Op.delete(
ledgerKeyBs,
DeleteOption.DEFAULT
))
.Else(io.etcd.jetcd.op.Op.get(
ledgerKeyBs,
GetOption.DEFAULT
))
.commit()
.thenAccept(txnResp -> {
if (txnResp.isSucceeded()) {
promise.complete(null);
} else {
GetResponse getResp = txnResp.getGetResponses().get(0);
if (getResp.getCount() > 0) {
// fail to delete the ledger
promise.completeExceptionally(new BKException.BKMetadataVersionException());
} else {
log.warn("Deleting ledger {} failed due to : ledger key {} doesn't exist", ledgerId, ledgerKey);
promise.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
}
}
})
.exceptionally(cause -> {
promise.completeExceptionally(new BKException.MetaStoreException());
return null;
});
return promise;
}
@Override
public CompletableFuture<Versioned<LedgerMetadata>> readLedgerMetadata(long ledgerId) {
CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
String ledgerKey = EtcdUtils.getLedgerKey(scope, ledgerId);
ByteSequence ledgerKeyBs = ByteSequence.from(ledgerKey, StandardCharsets.UTF_8);
log.info("read ledger metadata under key {}", ledgerKey);
kvClient.get(ledgerKeyBs)
.thenAccept(getResp -> {
if (getResp.getCount() > 0) {
KeyValue kv = getResp.getKvs().get(0);
byte[] data = kv.getValue().getBytes();
try {
LedgerMetadata metadata = serDe.parseConfig(data, ledgerId, Optional.empty());
promise.complete(new Versioned<>(metadata, new LongVersion(kv.getModRevision())));
} catch (IOException ioe) {
log.error("Could not parse ledger metadata for ledger : {}", ledgerId, ioe);
promise.completeExceptionally(new BKException.MetaStoreException());
return;
}
} else {
promise.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
}
})
.exceptionally(cause -> {
promise.completeExceptionally(new BKException.MetaStoreException());
return null;
});
return promise;
}
@Override
public CompletableFuture<Versioned<LedgerMetadata>> writeLedgerMetadata(long ledgerId, LedgerMetadata metadata,
Version currentVersion) {
CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
if (Version.NEW == currentVersion || !(currentVersion instanceof LongVersion)) {
promise.completeExceptionally(new BKException.BKMetadataVersionException());
return promise;
}
final LongVersion lv = (LongVersion) currentVersion;
String ledgerKey = EtcdUtils.getLedgerKey(scope, ledgerId);
ByteSequence ledgerKeyBs = ByteSequence.from(ledgerKey, StandardCharsets.UTF_8);
final ByteSequence valueBs;
try {
valueBs = ByteSequence.from(serDe.serialize(metadata));
} catch (IOException ioe) {
promise.completeExceptionally(new BKException.BKMetadataSerializationException(ioe));
return promise;
}
kvClient.txn()
.If(new Cmp(
ledgerKeyBs,
Cmp.Op.EQUAL,
CmpTarget.modRevision(lv.getLongVersion())))
.Then(io.etcd.jetcd.op.Op.put(
ledgerKeyBs,
valueBs,
PutOption.DEFAULT))
.Else(io.etcd.jetcd.op.Op.get(
ledgerKeyBs,
GetOption.DEFAULT))
.commit()
.thenAccept(resp -> {
if (resp.isSucceeded()) {
promise.complete(new Versioned<>(metadata, new LongVersion(resp.getHeader().getRevision())));
} else {
GetResponse getResp = resp.getGetResponses().get(0);
if (getResp.getCount() > 0) {
log.warn("Conditional update ledger metadata failed :"
+ " expected version = {}, actual version = {}",
getResp.getKvs().get(0).getModRevision(), lv);
promise.completeExceptionally(new BKException.BKMetadataVersionException());
} else {
promise.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
}
}
})
.exceptionally(cause -> {
promise.completeExceptionally(new BKException.MetaStoreException());
return null;
});
return promise;
}
private LedgerMetadataConsumer listenerToConsumer(long ledgerId,
LedgerMetadataListener listener,
Consumer<Long> onDeletedConsumer) {
return new LedgerMetadataConsumer(
ledgerId,
listener,
onDeletedConsumer
);
}
@Override
public void registerLedgerMetadataListener(long ledgerId, LedgerMetadataListener listener) {
if (listeners.containsKey(listener)) {
return;
}
ValueStream<LedgerMetadata> lmStream = watchers.computeIfAbsent(
ledgerId, (lid) -> new ValueStream<>(
client,
watchClient,
bs -> {
try {
return serDe.parseConfig(
bs.getBytes(),
lid,
Optional.empty()
);
} catch (IOException ioe) {
log.error("Could not parse ledger metadata : {}",
bs.toString(StandardCharsets.UTF_8), ioe);
throw new RuntimeException(
"Could not parse ledger metadata : "
+ bs.toString(StandardCharsets.UTF_8), ioe);
}
},
ByteSequence.from(EtcdUtils.getLedgerKey(scope, ledgerId), StandardCharsets.UTF_8))
);
LedgerMetadataConsumer lmConsumer = listenerToConsumer(ledgerId, listener,
(lid) -> {
if (watchers.remove(lid, lmStream)) {
log.info("Closed ledger metadata watcher on ledger {} deletion.", lid);
lmStream.closeAsync();
}
});
LedgerMetadataConsumer oldConsumer = listeners.putIfAbsent(listener, lmConsumer);
if (null != oldConsumer) {
return;
} else {
lmStream.readAndWatch(lmConsumer)
.whenComplete((values, cause) -> {
if (null != cause && !(cause instanceof ClosedClientException)) {
// fail to register ledger metadata listener, re-attempt it
registerLedgerMetadataListener(ledgerId, listener);
}
});
}
}
@Override
public void unregisterLedgerMetadataListener(long ledgerId, LedgerMetadataListener listener) {
LedgerMetadataConsumer lmConsumer = listeners.remove(listener);
unregisterLedgerMetadataListener(ledgerId, lmConsumer);
}
private void unregisterLedgerMetadataListener(long ledgerId, LedgerMetadataConsumer lmConsumer) {
ValueStream<LedgerMetadata> lmStream = watchers.get(ledgerId);
if (null == lmStream) {
return;
} else {
lmStream.unwatch(lmConsumer).thenAccept(noConsumers -> {
if (noConsumers) {
if (watchers.remove(ledgerId, lmStream)) {
log.info("Closed ledger metadata watcher on ledger {} since there are no listeners any more.",
ledgerId);
lmStream.closeAsync();
}
}
}).exceptionally(cause -> {
if (cause instanceof ClosedClientException) {
// fail to unwatch a consumer
unregisterLedgerMetadataListener(ledgerId, lmConsumer);
}
return null;
});
}
}
@Override
public void asyncProcessLedgers(Processor<Long> processor,
VoidCallback finalCb,
Object context,
int successRc,
int failureRc) {
KeyStream<Long> ks = new KeyStream<>(
kvClient,
ByteSequence.from(EtcdUtils.getLedgerKey(scope, 0L), StandardCharsets.UTF_8),
ByteSequence.from(EtcdUtils.getLedgerKey(scope, Long.MAX_VALUE), StandardCharsets.UTF_8),
bs -> {
UUID uuid = EtcdUtils.parseLedgerKey(bs.toString(StandardCharsets.UTF_8));
return uuid.getLeastSignificantBits();
}
);
processLedgers(
ks, processor, finalCb, context, successRc, failureRc);
}
private void processLedgers(KeyStream<Long> ks,
Processor<Long> processor,
VoidCallback finalCb,
Object context,
int successRc,
int failureRc) {
ks.readNext().whenCompleteAsync((ledgers, cause) -> {
if (null != cause) {
finalCb.processResult(failureRc, null, context);
} else {
if (ledgers.isEmpty()) {
finalCb.processResult(successRc, null, context);
} else {
ledgers.forEach(l -> processor.process(l, finalCb));
processLedgers(ks, processor, finalCb, context, successRc, failureRc);
}
}
});
}
@Override
public LedgerRangeIterator getLedgerRanges(long opTimeOutMs) {
KeyStream<Long> ks = new KeyStream<>(
kvClient,
ByteSequence.from(EtcdUtils.getLedgerKey(scope, 0L), StandardCharsets.UTF_8),
ByteSequence.from(EtcdUtils.getLedgerKey(scope, Long.MAX_VALUE), StandardCharsets.UTF_8),
bs -> {
UUID uuid = EtcdUtils.parseLedgerKey(bs.toString(StandardCharsets.UTF_8));
return uuid.getLeastSignificantBits();
}
);
KeyIterator<Long> ki = new KeyIterator<>(ks);
return new LedgerRangeIterator() {
@Override
public boolean hasNext() throws IOException {
try {
return ki.hasNext();
} catch (Exception e) {
if (e instanceof IOException) {
throw ((IOException) e);
} else {
throw new IOException(e);
}
}
}
@Override
public LedgerRange next() throws IOException {
try {
final List<Long> values = ki.next();
final Set<Long> ledgers = Sets.newTreeSet();
ledgers.addAll(values);
return new LedgerRange(ledgers);
} catch (Exception e) {
if (e instanceof IOException) {
throw ((IOException) e);
} else {
throw new IOException(e);
}
}
}
};
}
@Override
public void close() {
synchronized (this) {
if (closed) {
return;
}
closed = true;
}
watchClient.close();
}
}
| 591 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.options.WatchOption;
import io.etcd.jetcd.watch.WatchResponse;
import io.etcd.jetcd.watch.WatchResponseWithError;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.BiConsumer;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Watcher class holds watcher information.
*/
@Slf4j
public class EtcdWatcher implements AutoCloseable {
private final ScheduledExecutorService executor;
@Getter
private final WatchOption watchOption;
@Getter
private final ByteSequence key;
// watch listener
private final CopyOnWriteArraySet<BiConsumer<WatchResponse, Throwable>> consumers;
@Getter
@Setter
private long watchID;
// the revision to watch on.
@Getter
@Setter
private long revision;
private boolean closed = false;
// the client owns this watcher
private final EtcdWatchClient owner;
EtcdWatcher(ByteSequence key,
WatchOption watchOption,
ScheduledExecutorService executor,
EtcdWatchClient owner) {
this.key = key;
this.watchOption = watchOption;
this.executor = executor;
this.owner = owner;
this.consumers = new CopyOnWriteArraySet<>();
}
public void addConsumer(BiConsumer<WatchResponse, Throwable> consumer) {
this.consumers.add(consumer);
}
synchronized boolean isClosed() {
return closed;
}
void notifyWatchResponse(WatchResponseWithError watchResponse) {
synchronized (this) {
if (closed) {
return;
}
}
this.executor.submit(() -> consumers.forEach(c -> {
if (watchResponse.getException() != null) {
c.accept(null, watchResponse.getException());
} else {
c.accept(
watchResponse.getWatchResponse(),
null);
}
}));
}
public CompletableFuture<Void> closeAsync() {
return owner.unwatch(this);
}
@Override
public void close() {
synchronized (this) {
if (closed) {
return;
}
closed = true;
}
try {
FutureUtils.result(closeAsync());
} catch (Exception e) {
log.warn("Encountered error on removing watcher '{}' from watch client : {}",
watchID, e.getMessage());
}
consumers.clear();
}
}
| 592 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/LedgerMetadataConsumer.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import java.util.Objects;
import java.util.function.Consumer;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.LedgerMetadataListener;
import org.apache.bookkeeper.versioning.Versioned;
/**
* A consumer wrapper over ledger metadata listener.
*/
class LedgerMetadataConsumer implements Consumer<Versioned<LedgerMetadata>> {
private final long ledgerId;
private final LedgerMetadataListener listener;
private final Consumer<Long> onDeletedConsumer;
LedgerMetadataConsumer(long ledgerId,
LedgerMetadataListener listener,
Consumer<Long> onDeletedConsumer) {
this.ledgerId = ledgerId;
this.listener = listener;
this.onDeletedConsumer = onDeletedConsumer;
}
@Override
public int hashCode() {
return listener.hashCode();
}
@Override
public String toString() {
return listener.toString();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof LedgerMetadataConsumer)) {
return false;
}
LedgerMetadataConsumer another = (LedgerMetadataConsumer) obj;
return ledgerId == another.ledgerId
&& Objects.equals(listener, another.listener)
&& Objects.equals(onDeletedConsumer, another.onDeletedConsumer);
}
@Override
public void accept(Versioned<LedgerMetadata> ledgerMetadataVersioned) {
if (null == ledgerMetadataVersioned.getValue()) {
onDeletedConsumer.accept(ledgerId);
} else {
listener.onChanged(ledgerId, ledgerMetadataVersioned);
}
}
}
| 593 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdBookieRegister.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.msResult;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.etcd.jetcd.Lease;
import io.etcd.jetcd.lease.LeaseKeepAliveResponse;
import io.etcd.jetcd.support.CloseableClient;
import io.grpc.stub.StreamObserver;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.discover.RegistrationManager.RegistrationListener;
/**
* Register to register a bookie in Etcd.
*/
@Slf4j
class EtcdBookieRegister implements AutoCloseable, Runnable, Supplier<Long> {
private final Lease leaseClient;
private final long ttlSeconds;
private final ScheduledExecutorService executor;
private RegistrationListener regListener;
private volatile CompletableFuture<Long> leaseFuture = new CompletableFuture<>();
private volatile CompletableFuture<Void> keepAliveFuture = new CompletableFuture<>();
@Getter(AccessLevel.PACKAGE)
private volatile long leaseId = -0xabcd;
private volatile CloseableClient kaListener = null;
private volatile boolean running = true;
private long nextWaitTimeMs = 200;
private Future<?> runFuture = null;
EtcdBookieRegister(Lease leaseClient,
long ttlSeconds) {
this.leaseClient = leaseClient;
this.ttlSeconds = ttlSeconds;
this.executor = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder()
.setNameFormat("bookie-etcd-keepalive-thread")
.build());
}
public EtcdBookieRegister addRegistrationListener(RegistrationListener regListener) {
this.regListener = regListener;
return this;
}
long getTtlSeconds() {
return ttlSeconds;
}
public synchronized EtcdBookieRegister start() {
if (null == runFuture) {
runFuture = executor.submit(this);
}
return this;
}
private void newLeaseIfNeeded() throws MetadataStoreException {
boolean newLeaseNeeded;
synchronized (this) {
newLeaseNeeded = !leaseFuture.isDone();
}
if (newLeaseNeeded) {
long leaseId = msResult(leaseClient.grant(ttlSeconds)).getID();
keepAliveFuture = new CompletableFuture<>();
if (kaListener != null) {
synchronized (this) {
kaListener.close();
kaListener = null;
}
}
this.kaListener = leaseClient.keepAlive(leaseId, new StreamObserver<LeaseKeepAliveResponse>() {
@Override
public void onNext(LeaseKeepAliveResponse response) {
log.info("KeepAlive response : lease = {}, ttl = {}",
response.getID(), response.getTTL());
}
@Override
public void onError(Throwable t) {
log.info("KeepAlive renewal failed, leaseId {}", leaseId, t.fillInStackTrace());
keepAliveFuture.completeExceptionally(t);
}
@Override
public void onCompleted() {
log.info("lease completed! leaseId {}", leaseId);
keepAliveFuture.cancel(true);
}
});
this.leaseId = leaseId;
leaseFuture.complete(leaseId);
log.info("New lease '{}' is granted.", leaseId);
}
}
private void waitForNewLeaseId() {
while (running) {
try {
newLeaseIfNeeded();
nextWaitTimeMs = 100L;
} catch (MetadataStoreException e) {
log.error("Failed to grant a new lease for leaseId {}", leaseId, e);
try {
TimeUnit.MILLISECONDS.sleep(nextWaitTimeMs);
nextWaitTimeMs *= 2;
nextWaitTimeMs = Math.min(nextWaitTimeMs, TimeUnit.SECONDS.toMillis(ttlSeconds));
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
log.warn("Interrupted at backing off granting a new lease for leaseId {}", leaseId);
}
continue;
}
}
}
@Override
public void run() {
while (running) {
waitForNewLeaseId();
// here we get a lease, keep it alive
try {
log.info("Keeping Alive at lease = {}", get());
keepAliveFuture.get();
continue;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
log.warn("Interrupted at keeping lease '{}' alive", leaseId);
resetLease();
} catch (ExecutionException ee) {
log.warn("Failed to keep alive lease '{}'", leaseId, ee);
resetLease();
}
}
}
private void resetLease() {
synchronized (this) {
leaseFuture = new CompletableFuture<>();
}
if (null != regListener) {
regListener.onRegistrationExpired();
}
}
@Override
public void close() {
synchronized (this) {
if (!running) {
return;
} else {
running = false;
}
if (null != runFuture) {
if (runFuture.cancel(true)) {
log.info("Successfully interrupted bookie register.");
}
}
keepAliveFuture.cancel(true);
if (kaListener != null) {
kaListener.close();
kaListener = null;
}
}
CompletableFuture<Void> closeFuture = new CompletableFuture<>();
executor.submit(() -> {
FutureUtils.complete(closeFuture, (Void) null);
});
closeFuture.join();
}
@Override
public Long get() {
while (true) {
try {
return leaseFuture.get(100, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
log.warn("Interrupted at getting lease id", e);
return -1L;
} catch (ExecutionException e) {
throw new IllegalArgumentException("Should never reach here");
} catch (TimeoutException e) {
continue;
}
}
}
}
| 594 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdRegistrationManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getBookiesEndPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getBucketsPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getClusterInstanceIdPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getCookiePath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getCookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getLayoutKey;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getLedgersPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getReadonlyBookiePath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getReadonlyBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getScopeEndKey;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getUnderreplicationPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getWritableBookiePath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.getWritableBookiesPath;
import static org.apache.bookkeeper.metadata.etcd.EtcdUtils.msResult;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.UncheckedExecutionException;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.KV;
import io.etcd.jetcd.KeyValue;
import io.etcd.jetcd.Txn;
import io.etcd.jetcd.Watch;
import io.etcd.jetcd.Watch.Watcher;
import io.etcd.jetcd.kv.DeleteResponse;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.kv.TxnResponse;
import io.etcd.jetcd.op.Cmp;
import io.etcd.jetcd.op.Cmp.Op;
import io.etcd.jetcd.op.CmpTarget;
import io.etcd.jetcd.options.DeleteOption;
import io.etcd.jetcd.options.GetOption;
import io.etcd.jetcd.options.PutOption;
import io.etcd.jetcd.options.WatchOption;
import io.etcd.jetcd.watch.WatchEvent;
import io.etcd.jetcd.watch.WatchEvent.EventType;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.BookieException;
import org.apache.bookkeeper.bookie.BookieException.BookieIllegalOpException;
import org.apache.bookkeeper.bookie.BookieException.CookieNotFoundException;
import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.meta.LedgerLayout;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
/**
* Etcd registration manager.
*/
@Slf4j
class EtcdRegistrationManager implements RegistrationManager {
private final String scope;
@Getter(AccessLevel.PACKAGE)
private final Client client;
private final boolean ownClient;
private final KV kvClient;
@Getter(AccessLevel.PACKAGE)
private final EtcdBookieRegister bkRegister;
@VisibleForTesting
EtcdRegistrationManager(Client client,
String scope) {
this(client, scope, 60);
}
@VisibleForTesting
EtcdRegistrationManager(Client client,
String scope,
long ttlSeconds) {
this(client, scope, ttlSeconds, () -> {});
}
@VisibleForTesting
EtcdRegistrationManager(Client client,
String scope,
long ttlSeconds,
RegistrationListener listener) {
this(
client,
scope,
new EtcdBookieRegister(
client.getLeaseClient(),
ttlSeconds
).addRegistrationListener(listener).start(),
true);
}
EtcdRegistrationManager(Client client,
String scope,
EtcdBookieRegister bkRegister) {
this(client, scope, bkRegister, false);
}
private EtcdRegistrationManager(Client client,
String scope,
EtcdBookieRegister bkRegister,
boolean ownClient) {
this.scope = scope;
this.client = client;
this.kvClient = client.getKVClient();
this.bkRegister = bkRegister;
this.ownClient = ownClient;
}
@Override
public void close() {
if (ownClient) {
log.info("Closing registration manager under scope '{}'", scope);
bkRegister.close();
client.close();
log.info("Successfully closed registration manager under scope '{}'", scope);
}
}
@Override
public void registerBookie(BookieId bookieId, boolean readOnly,
BookieServiceInfo bookieServiceInfo) throws BookieException {
if (readOnly) {
doRegisterReadonlyBookie(bookieId, bkRegister.get());
} else {
doRegisterBookie(getWritableBookiePath(scope, bookieId), bkRegister.get());
}
}
private boolean checkRegNodeAndWaitExpired(String regPath, long leaseId)
throws MetadataStoreException {
ByteSequence regPathBs = ByteSequence.from(regPath, UTF_8);
GetResponse getResp = msResult(kvClient.get(regPathBs));
if (getResp.getCount() <= 0) {
// key doesn't exist anymore
return false;
} else {
return waitUntilRegNodeExpired(regPath, leaseId);
}
}
private boolean waitUntilRegNodeExpired(String regPath, long leaseId)
throws MetadataStoreException {
ByteSequence regPathBs = ByteSequence.from(regPath, UTF_8);
// check regPath again
GetResponse getResp = msResult(kvClient.get(regPathBs));
if (getResp.getCount() <= 0) {
// key disappears after watching it
return false;
} else {
KeyValue kv = getResp.getKvs().get(0);
if (kv.getLease() != leaseId) {
Watch watchClient = client.getWatchClient();
final CompletableFuture<Void> watchFuture = new CompletableFuture<>();
Watcher watcher = watchClient.watch(
regPathBs,
WatchOption.newBuilder()
.withRevision(getResp.getHeader().getRevision() + 1)
.build(),
response -> {
for (WatchEvent event : response.getEvents()) {
log.info("Received watch event on '{}' : EventType = {}, lease {}",
regPath, event.getEventType(), leaseId);
if (EventType.DELETE == event.getEventType()) {
watchFuture.complete(null);
return;
}
}
},
exception -> {
log.warn("Exception in keepAlive for watch event on {}, lease {}",
regPath, leaseId, exception);
watchFuture.completeExceptionally(new UncheckedExecutionException(
"Interrupted at waiting previous registration under "
+ regPath + " (lease = " + kv.getLease() + ") to be expired", exception));
}
);
log.info("Previous bookie registration (lease = {}) still exists at {}, "
+ "so new lease '{}' will be waiting previous lease for {} seconds to be expired",
kv.getLease(), regPath, leaseId, bkRegister.getTtlSeconds());
try {
msResult(watchFuture, 2 * bkRegister.getTtlSeconds(), TimeUnit.SECONDS);
return false;
} catch (TimeoutException e) {
watchFuture.cancel(true);
throw new MetadataStoreException(
"Previous bookie registration still exists at "
+ regPath + " (lease = " + kv.getLease() + ") after "
+ (2 * bkRegister.getTtlSeconds()) + " seconds elapsed");
} catch (UncheckedExecutionException uee) {
throw new MetadataStoreException(uee.getMessage(), uee.getCause());
} finally {
watcher.close();
}
} else {
// key exists with same lease
return true;
}
}
}
private void doRegisterBookie(String regPath, long leaseId) throws MetadataStoreException {
if (checkRegNodeAndWaitExpired(regPath, leaseId)) {
// the bookie is already registered under `${regPath}` with `${leaseId}`.
return;
}
ByteSequence regPathBs = ByteSequence.from(regPath, UTF_8);
Txn txn = kvClient.txn()
.If(new Cmp(
regPathBs,
Op.GREATER,
CmpTarget.createRevision(0)))
.Then(io.etcd.jetcd.op.Op.get(regPathBs, GetOption.DEFAULT))
.Else(io.etcd.jetcd.op.Op.put(
regPathBs,
ByteSequence.from(new byte[0]),
PutOption.newBuilder()
.withLeaseId(bkRegister.get())
.build()
));
TxnResponse txnResp = msResult(txn.commit());
if (txnResp.isSucceeded()) {
// the key already exists
GetResponse getResp = txnResp.getGetResponses().get(0);
if (getResp.getCount() <= 0) {
throw new MetadataStoreException(
"Failed to register bookie under '" + regPath
+ "', but no bookie is registered there.");
} else {
KeyValue kv = getResp.getKvs().get(0);
throw new MetadataStoreException("Another bookie already registered under '"
+ regPath + "': lease = " + kv.getLease());
}
} else {
log.info("Successfully registered bookie at {}", regPath);
}
}
private void doRegisterReadonlyBookie(BookieId bookieId, long leaseId) throws MetadataStoreException {
String readonlyRegPath = getReadonlyBookiePath(scope, bookieId);
doRegisterBookie(readonlyRegPath, leaseId);
String writableRegPath = getWritableBookiePath(scope, bookieId);
msResult(kvClient.delete(ByteSequence.from(writableRegPath, UTF_8)));
}
@Override
public void unregisterBookie(BookieId bookieId, boolean readOnly) throws BookieException {
String regPath;
if (readOnly) {
regPath = getReadonlyBookiePath(scope, bookieId);
} else {
regPath = getWritableBookiePath(scope, bookieId);
}
DeleteResponse delResp = msResult(kvClient.delete(ByteSequence.from(regPath, UTF_8)));
if (delResp.getDeleted() > 0) {
log.info("Successfully unregistered bookie {} from {}", bookieId, regPath);
} else {
log.info("Bookie disappeared from {} before unregistering", regPath);
}
}
@Override
public boolean isBookieRegistered(BookieId bookieId) throws BookieException {
CompletableFuture<GetResponse> getWritableFuture = kvClient.get(
ByteSequence.from(getWritableBookiePath(scope, bookieId), UTF_8),
GetOption.newBuilder()
.withCountOnly(true)
.build());
CompletableFuture<GetResponse> getReadonlyFuture = kvClient.get(
ByteSequence.from(getReadonlyBookiePath(scope, bookieId), UTF_8),
GetOption.newBuilder()
.withCountOnly(true)
.build());
return msResult(getWritableFuture).getCount() > 0
|| msResult(getReadonlyFuture).getCount() > 0;
}
@Override
public void writeCookie(BookieId bookieId, Versioned<byte[]> cookieData) throws BookieException {
ByteSequence cookiePath = ByteSequence.from(getCookiePath(scope, bookieId), UTF_8);
Txn txn = kvClient.txn();
if (Version.NEW == cookieData.getVersion()) {
txn.If(new Cmp(
cookiePath,
Op.GREATER,
CmpTarget.createRevision(0L))
)
// if key not exists, create one.
.Else(io.etcd.jetcd.op.Op.put(
cookiePath,
ByteSequence.from(cookieData.getValue()),
PutOption.DEFAULT)
);
} else {
if (!(cookieData.getVersion() instanceof LongVersion)) {
throw new BookieIllegalOpException("Invalid version type, expected it to be LongVersion");
}
txn.If(new Cmp(
cookiePath,
Op.EQUAL,
CmpTarget.modRevision(((LongVersion) cookieData.getVersion()).getLongVersion()))
)
.Then(io.etcd.jetcd.op.Op.put(
cookiePath,
ByteSequence.from(cookieData.getValue()),
PutOption.DEFAULT)
);
}
TxnResponse response = msResult(txn.commit());
if (response.isSucceeded() != (Version.NEW != cookieData.getVersion())) {
throw new MetadataStoreException(
"Conflict on writing cookie for bookie " + bookieId);
}
}
@Override
public Versioned<byte[]> readCookie(BookieId bookieId) throws BookieException {
ByteSequence cookiePath = ByteSequence.from(getCookiePath(scope, bookieId), UTF_8);
GetResponse resp = msResult(kvClient.get(cookiePath));
if (resp.getCount() <= 0) {
throw new CookieNotFoundException(bookieId.toString());
} else {
KeyValue kv = resp.getKvs().get(0);
return new Versioned<>(
kv.getValue().getBytes(),
new LongVersion(kv.getModRevision()));
}
}
@Override
public void removeCookie(BookieId bookieId, Version version) throws BookieException {
ByteSequence cookiePath = ByteSequence.from(getCookiePath(scope, bookieId), UTF_8);
Txn delTxn = kvClient.txn()
.If(new Cmp(
cookiePath,
Op.EQUAL,
CmpTarget.modRevision(((LongVersion) version).getLongVersion())
))
.Then(io.etcd.jetcd.op.Op.delete(
cookiePath,
DeleteOption.DEFAULT
))
.Else(io.etcd.jetcd.op.Op.get(
cookiePath,
GetOption.newBuilder().withCountOnly(true).build()
));
TxnResponse txnResp = msResult(delTxn.commit());
if (!txnResp.isSucceeded()) {
GetResponse getResp = txnResp.getGetResponses().get(0);
if (getResp.getCount() > 0) {
throw new MetadataStoreException(
"Failed to remove cookie from " + cookiePath.toString(UTF_8)
+ " for bookie " + bookieId + " : bad version '" + version + "'");
} else {
throw new CookieNotFoundException(bookieId.toString());
}
} else {
log.info("Removed cookie from {} for bookie {}",
cookiePath.toString(UTF_8), bookieId);
}
}
@Override
public String getClusterInstanceId() throws BookieException {
GetResponse response = msResult(
kvClient.get(ByteSequence.from(getClusterInstanceIdPath(scope), UTF_8)));
if (response.getCount() <= 0) {
log.error("BookKeeper metadata doesn't exist in Etcd. "
+ "Has the cluster been initialized? "
+ "Try running bin/bookkeeper shell initNewCluster");
throw new MetadataStoreException("BookKeeper is not initialized under '" + scope + "' yet");
} else {
KeyValue kv = response.getKvs().get(0);
return new String(kv.getValue().getBytes(), UTF_8);
}
}
@Override
public boolean prepareFormat() throws Exception {
ByteSequence rootScopeKey = ByteSequence.from(scope, UTF_8);
GetResponse resp = msResult(kvClient.get(rootScopeKey));
return resp.getCount() > 0;
}
@Override
public boolean initNewCluster() throws Exception {
return initNewCluster(kvClient, scope);
}
static boolean initNewCluster(KV kvClient, String scope) throws Exception {
ByteSequence rootScopeKey = ByteSequence.from(scope, UTF_8);
String instanceId = UUID.randomUUID().toString();
LedgerLayout layout = new LedgerLayout(
EtcdLedgerManagerFactory.class.getName(),
EtcdLedgerManagerFactory.VERSION
);
Txn initTxn = kvClient.txn()
.If(new Cmp(
rootScopeKey,
Op.GREATER,
CmpTarget.createRevision(0L)
))
// only put keys when root scope doesn't exist
.Else(
// `${scope}`
io.etcd.jetcd.op.Op.put(
rootScopeKey,
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/layout`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getLayoutKey(scope), UTF_8),
ByteSequence.from(layout.serialize()),
PutOption.DEFAULT
),
// `${scope}/instanceid`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getClusterInstanceIdPath(scope), UTF_8),
ByteSequence.from(instanceId, UTF_8),
PutOption.DEFAULT
),
// `${scope}/cookies`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getCookiesPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/bookies`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getBookiesPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/bookies/writable`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getWritableBookiesPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/bookies/readonly`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getReadonlyBookiesPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/ledgers`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getLedgersPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/buckets`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getBucketsPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
),
// `${scope}/underreplication`
io.etcd.jetcd.op.Op.put(
ByteSequence.from(getUnderreplicationPath(scope), UTF_8),
EtcdConstants.EMPTY_BS,
PutOption.DEFAULT
)
);
return !msResult(initTxn.commit()).isSucceeded();
}
@Override
public boolean format() throws Exception {
return format(kvClient, scope);
}
static boolean format(KV kvClient, String scope) throws Exception {
ByteSequence rootScopeKey = ByteSequence.from(scope, UTF_8);
GetResponse resp = msResult(kvClient.get(rootScopeKey));
if (resp.getCount() <= 0) {
// cluster doesn't exist
return initNewCluster(kvClient, scope);
} else if (nukeExistingCluster(kvClient, scope)) { // cluster exists and has successfully nuked it
return initNewCluster(kvClient, scope);
} else {
return false;
}
}
@Override
public boolean nukeExistingCluster() throws Exception {
return nukeExistingCluster(kvClient, scope);
}
@Override
public void addRegistrationListener(RegistrationListener listener) {
bkRegister.addRegistrationListener(listener);
}
static boolean nukeExistingCluster(KV kvClient, String scope) throws Exception {
ByteSequence rootScopeKey = ByteSequence.from(scope, UTF_8);
GetResponse resp = msResult(kvClient.get(rootScopeKey));
if (resp.getCount() <= 0) {
log.info("There is no existing cluster with under scope '{}' in Etcd, "
+ "so exiting nuke operation", scope);
return true;
}
String bookiesPath = getBookiesPath(scope);
String bookiesEndPath = getBookiesEndPath(scope);
resp = msResult(kvClient.get(
ByteSequence.from(bookiesPath, UTF_8),
GetOption.newBuilder()
.withRange(ByteSequence.from(bookiesEndPath, UTF_8))
.withKeysOnly(true)
.build()
));
String writableBookiesPath = getWritableBookiesPath(scope);
String readonlyBookiesPath = getReadonlyBookiesPath(scope);
boolean hasBookiesAlive = false;
for (KeyValue kv : resp.getKvs()) {
String keyStr = new String(kv.getKey().getBytes(), UTF_8);
if (keyStr.equals(bookiesPath)
|| keyStr.equals(writableBookiesPath)
|| keyStr.equals(readonlyBookiesPath)) {
continue;
} else {
hasBookiesAlive = true;
break;
}
}
if (hasBookiesAlive) {
log.error("Bookies are still up and connected to this cluster, "
+ "stop all bookies before nuking the cluster");
return false;
}
DeleteResponse delResp = msResult(kvClient.delete(
rootScopeKey,
DeleteOption.newBuilder()
.withRange(ByteSequence.from(getScopeEndKey(scope), UTF_8))
.build()));
log.info("Successfully nuked cluster under scope '{}' : {} kv pairs deleted",
scope, delResp.getDeleted());
return true;
}
}
| 595 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Etcd based metadata driver.
*/
package org.apache.bookkeeper.metadata.etcd; | 596 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/Etcd64bitIdGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.bookkeeper.metadata.etcd.EtcdConstants.EMPTY_BS;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.KV;
import io.etcd.jetcd.KeyValue;
import io.etcd.jetcd.Txn;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.op.Cmp;
import io.etcd.jetcd.op.Cmp.Op;
import io.etcd.jetcd.op.CmpTarget;
import io.etcd.jetcd.options.GetOption;
import io.etcd.jetcd.options.PutOption;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.client.BKException.Code;
import org.apache.bookkeeper.meta.LedgerIdGenerator;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
/**
* Generate 64-bit ledger ids from a bucket.
*
* <p>The most significant 8 bits is used as bucket id. The remaining 56 bits are
* used as the id generated per bucket.
*/
@Slf4j
class Etcd64bitIdGenerator implements LedgerIdGenerator {
static final long MAX_ID_PER_BUCKET = 0x00ffffffffffffffL;
static final long BUCKET_ID_MASK = 0xff00000000000000L;
static final int BUCKET_ID_SHIFT = 56;
static final int NUM_BUCKETS = 0x80;
static int getBucketId(long lid) {
return (int) ((lid & BUCKET_ID_MASK) >>> BUCKET_ID_SHIFT);
}
static long getIdInBucket(long lid) {
return lid & MAX_ID_PER_BUCKET;
}
private static final AtomicIntegerFieldUpdater<Etcd64bitIdGenerator> nextBucketIdUpdater =
AtomicIntegerFieldUpdater.newUpdater(Etcd64bitIdGenerator.class, "nextBucketId");
private final String scope;
private final KV kvClient;
private volatile int nextBucketId;
Etcd64bitIdGenerator(KV kvClient, String scope) {
this.kvClient = kvClient;
this.scope = scope;
this.nextBucketId = ThreadLocalRandom.current().nextInt(NUM_BUCKETS);
}
int nextBucketId() {
while (true) {
int bucketId = nextBucketIdUpdater.incrementAndGet(this);
if (bucketId >= NUM_BUCKETS) {
if (nextBucketIdUpdater.compareAndSet(this, bucketId, 0)) {
bucketId = 0;
} else {
// someone has been updated bucketId, try it again.
continue;
}
}
return bucketId;
}
}
@Override
public void generateLedgerId(GenericCallback<Long> cb) {
int bucketId = nextBucketId();
checkArgument(bucketId >= 0 && bucketId < NUM_BUCKETS,
"Invalid bucket id : " + bucketId);
ByteSequence bucketKey = ByteSequence.from(EtcdUtils.getBucketPath(scope, bucketId), StandardCharsets.UTF_8);
Txn txn = kvClient.txn()
.If(new Cmp(bucketKey, Op.GREATER, CmpTarget.createRevision(0)))
.Then(
io.etcd.jetcd.op.Op.put(bucketKey, EMPTY_BS, PutOption.DEFAULT),
io.etcd.jetcd.op.Op.get(bucketKey, GetOption.DEFAULT)
)
.Else(
io.etcd.jetcd.op.Op.put(bucketKey, EMPTY_BS, PutOption.DEFAULT),
io.etcd.jetcd.op.Op.get(bucketKey, GetOption.DEFAULT)
);
txn.commit()
.thenAccept(txnResponse -> {
if (txnResponse.getGetResponses().size() <= 0) {
cb.operationComplete(Code.UnexpectedConditionException, null);
} else {
GetResponse resp = txnResponse.getGetResponses().get(0);
if (resp.getCount() > 0) {
KeyValue kv = resp.getKvs().get(0);
if (kv.getVersion() > MAX_ID_PER_BUCKET) {
log.warn("Etcd bucket '{}' is overflowed", bucketKey.toString(StandardCharsets.UTF_8));
// the bucket is overflowed, moved to next bucket.
generateLedgerId(cb);
} else {
long version = kv.getVersion();
long lid = ((((long) bucketId) << BUCKET_ID_SHIFT) & BUCKET_ID_MASK)
| (version & MAX_ID_PER_BUCKET);
cb.operationComplete(Code.OK, lid);
}
} else {
cb.operationComplete(Code.UnexpectedConditionException, null);
}
}
})
.exceptionally(cause -> {
cb.operationComplete(Code.MetaStoreException, null);
return null;
});
}
@Override
public void close() {
// no-op
}
}
| 597 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/EtcdConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd;
import io.etcd.jetcd.ByteSequence;
/**
* Constants used in the Etcd metadata drivers.
*/
final class EtcdConstants {
private EtcdConstants() {}
public static final String END_SEP = "0";
public static final String LAYOUT_NODE = "layout";
public static final String INSTANCEID_NODE = "instanceid";
public static final String COOKIES_NODE = "cookies";
public static final String LEDGERS_NODE = "ledgers";
public static final String BUCKETS_NODE = "buckets";
//
// membership related constants
//
public static final String MEMBERS_NODE = "bookies";
public static final String WRITEABLE_NODE = "writable";
public static final String READONLY_NODE = "readonly";
//
// underreplication related constants
//
public static final String UR_NODE = "underreplication";
public static final ByteSequence EMPTY_BS = ByteSequence.from(new byte[0]);
}
| 598 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/KeyIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.CompletableFuture;
/**
* Iterator over a range of key/value pairs.
*/
public class KeyIterator<T> {
private final KeyStream<T> stream;
private CompletableFuture<List<T>> readFuture = null;
private boolean hasNext = true;
private List<T> keys = null;
public KeyIterator(KeyStream<T> stream) {
this.stream = stream;
}
public synchronized boolean hasNext() throws Exception {
if (hasNext) {
if (null == readFuture) {
readFuture = stream.readNext();
}
keys = result(readFuture);
if (keys.isEmpty()) {
hasNext = false;
}
return hasNext;
} else {
return false;
}
}
public synchronized List<T> next() throws Exception {
try {
if (!hasNext()) {
throw new NoSuchElementException("Reach end of key stream");
}
return keys;
} finally {
// read next
readFuture = stream.readNext();
}
}
}
| 599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.