mirror of https://github.com/hyperledger/besu
Snapsync persist state (#4381)
This PR avoids restarting the download of the world state from scratch when restarting Besu Signed-off-by: Karim TAAM <karim.t2am@gmail.com>pull/4586/head
parent
6f20060182
commit
da9b10767a
@ -0,0 +1,138 @@ |
||||
/* |
||||
* Copyright contributors to Hyperledger Besu |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on |
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the |
||||
* specific language governing permissions and limitations under the License. |
||||
* |
||||
* SPDX-License-Identifier: Apache-2.0 |
||||
*/ |
||||
package org.hyperledger.besu.ethereum.eth.sync.snapsync; |
||||
|
||||
import org.hyperledger.besu.ethereum.eth.sync.backwardsync.GenericKeyValueStorageFacade; |
||||
import org.hyperledger.besu.ethereum.eth.sync.backwardsync.ValueConvertor; |
||||
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountRangeDataRequest; |
||||
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest; |
||||
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput; |
||||
import org.hyperledger.besu.ethereum.storage.StorageProvider; |
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier; |
||||
|
||||
import java.io.IOException; |
||||
import java.math.BigInteger; |
||||
import java.nio.charset.StandardCharsets; |
||||
import java.util.Arrays; |
||||
import java.util.HashSet; |
||||
import java.util.List; |
||||
import java.util.function.Predicate; |
||||
import java.util.stream.Collectors; |
||||
import java.util.stream.IntStream; |
||||
|
||||
import org.apache.tuweni.bytes.Bytes; |
||||
|
||||
public class SnapPersistedContext { |
||||
|
||||
private final byte[] SNAP_INCONSISTENT_ACCOUNT_INDEX = |
||||
"snapInconsistentAccountsStorageIndex".getBytes(StandardCharsets.UTF_8); |
||||
|
||||
private final GenericKeyValueStorageFacade<BigInteger, AccountRangeDataRequest> |
||||
accountRangeToDownload; |
||||
private final GenericKeyValueStorageFacade<BigInteger, Bytes> inconsistentAccounts; |
||||
|
||||
public SnapPersistedContext(final StorageProvider storageProvider) { |
||||
this.accountRangeToDownload = |
||||
new GenericKeyValueStorageFacade<>( |
||||
BigInteger::toByteArray, |
||||
new ValueConvertor<>() { |
||||
@Override |
||||
public AccountRangeDataRequest fromBytes(final byte[] bytes) { |
||||
return AccountRangeDataRequest.deserialize( |
||||
new BytesValueRLPInput(Bytes.of(bytes), false)); |
||||
} |
||||
|
||||
@Override |
||||
public byte[] toBytes(final AccountRangeDataRequest value) { |
||||
return value.serialize().toArrayUnsafe(); |
||||
} |
||||
}, |
||||
storageProvider.getStorageBySegmentIdentifier( |
||||
KeyValueSegmentIdentifier.SNAPSYNC_MISSING_ACCOUNT_RANGE)); |
||||
this.inconsistentAccounts = |
||||
new GenericKeyValueStorageFacade<>( |
||||
BigInteger::toByteArray, |
||||
new ValueConvertor<>() { |
||||
@Override |
||||
public Bytes fromBytes(final byte[] bytes) { |
||||
return Bytes.of(bytes); |
||||
} |
||||
|
||||
@Override |
||||
public byte[] toBytes(final Bytes value) { |
||||
return value.toArrayUnsafe(); |
||||
} |
||||
}, |
||||
storageProvider.getStorageBySegmentIdentifier( |
||||
KeyValueSegmentIdentifier.SNAPSYNC_ACCOUNT_TO_FIX)); |
||||
} |
||||
|
||||
public void updatePersistedTasks(final List<? extends SnapDataRequest> accountRangeDataRequests) { |
||||
accountRangeToDownload.clear(); |
||||
accountRangeToDownload.putAll( |
||||
keyValueStorageTransaction -> |
||||
IntStream.range(0, accountRangeDataRequests.size()) |
||||
.forEach( |
||||
index -> |
||||
keyValueStorageTransaction.put( |
||||
BigInteger.valueOf(index).toByteArray(), |
||||
((AccountRangeDataRequest) accountRangeDataRequests.get(index)) |
||||
.serialize() |
||||
.toArrayUnsafe()))); |
||||
} |
||||
|
||||
public void addInconsistentAccount(final Bytes inconsistentAccount) { |
||||
final BigInteger index = |
||||
inconsistentAccounts |
||||
.get(SNAP_INCONSISTENT_ACCOUNT_INDEX) |
||||
.map(bytes -> new BigInteger(bytes.toArrayUnsafe()).add(BigInteger.ONE)) |
||||
.orElse(BigInteger.ZERO); |
||||
inconsistentAccounts.putAll( |
||||
keyValueStorageTransaction -> { |
||||
keyValueStorageTransaction.put(SNAP_INCONSISTENT_ACCOUNT_INDEX, index.toByteArray()); |
||||
keyValueStorageTransaction.put(index.toByteArray(), inconsistentAccount.toArrayUnsafe()); |
||||
}); |
||||
} |
||||
|
||||
public List<AccountRangeDataRequest> getPersistedTasks() { |
||||
return accountRangeToDownload |
||||
.streamValuesFromKeysThat(bytes -> true) |
||||
.collect(Collectors.toList()); |
||||
} |
||||
|
||||
public HashSet<Bytes> getInconsistentAccounts() { |
||||
return inconsistentAccounts |
||||
.streamValuesFromKeysThat(notEqualsTo(SNAP_INCONSISTENT_ACCOUNT_INDEX)) |
||||
.collect(Collectors.toCollection(HashSet::new)); |
||||
} |
||||
|
||||
public void clearAccountRangeTasks() { |
||||
accountRangeToDownload.clear(); |
||||
} |
||||
|
||||
public void clear() { |
||||
accountRangeToDownload.clear(); |
||||
inconsistentAccounts.clear(); |
||||
} |
||||
|
||||
public void close() throws IOException { |
||||
accountRangeToDownload.close(); |
||||
inconsistentAccounts.close(); |
||||
} |
||||
|
||||
private Predicate<byte[]> notEqualsTo(final byte[] name) { |
||||
return key -> !Arrays.equals(key, name); |
||||
} |
||||
} |
@ -1,290 +0,0 @@ |
||||
/* |
||||
* Copyright ConsenSys AG. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on |
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the |
||||
* specific language governing permissions and limitations under the License. |
||||
* |
||||
* SPDX-License-Identifier: Apache-2.0 |
||||
*/ |
||||
package org.hyperledger.besu.services.tasks; |
||||
|
||||
import java.io.File; |
||||
import java.io.IOException; |
||||
import java.nio.ByteBuffer; |
||||
import java.nio.channels.FileChannel; |
||||
import java.nio.file.Path; |
||||
import java.nio.file.StandardOpenOption; |
||||
import java.util.HashSet; |
||||
import java.util.Set; |
||||
import java.util.concurrent.atomic.AtomicBoolean; |
||||
import java.util.function.Function; |
||||
|
||||
import com.google.common.annotations.VisibleForTesting; |
||||
import org.apache.tuweni.bytes.Bytes; |
||||
import org.slf4j.Logger; |
||||
import org.slf4j.LoggerFactory; |
||||
|
||||
public class FlatFileTaskCollection<T> implements TaskCollection<T> { |
||||
private static final Logger LOG = LoggerFactory.getLogger(FlatFileTaskCollection.class); |
||||
private static final long DEFAULT_FILE_ROLL_SIZE_BYTES = 1024 * 1024 * 10; // 10Mb
|
||||
static final String FILENAME_PREFIX = "tasks"; |
||||
private final Set<FlatFileTask<T>> outstandingTasks = new HashSet<>(); |
||||
|
||||
private final Path storageDirectory; |
||||
private final Function<T, Bytes> serializer; |
||||
private final Function<Bytes, T> deserializer; |
||||
private final long rollWhenFileSizeExceedsBytes; |
||||
|
||||
private final ByteBuffer lengthBuffer = ByteBuffer.allocate(Integer.BYTES); |
||||
|
||||
private FileChannel readFileChannel; |
||||
private FileChannel writeFileChannel; |
||||
|
||||
private long size = 0; |
||||
private int readFileNumber = 0; |
||||
private int writeFileNumber = 0; |
||||
|
||||
public FlatFileTaskCollection( |
||||
final Path storageDirectory, |
||||
final Function<T, Bytes> serializer, |
||||
final Function<Bytes, T> deserializer) { |
||||
this(storageDirectory, serializer, deserializer, DEFAULT_FILE_ROLL_SIZE_BYTES); |
||||
} |
||||
|
||||
FlatFileTaskCollection( |
||||
final Path storageDirectory, |
||||
final Function<T, Bytes> serializer, |
||||
final Function<Bytes, T> deserializer, |
||||
final long rollWhenFileSizeExceedsBytes) { |
||||
this.storageDirectory = storageDirectory; |
||||
this.serializer = serializer; |
||||
this.deserializer = deserializer; |
||||
this.rollWhenFileSizeExceedsBytes = rollWhenFileSizeExceedsBytes; |
||||
writeFileChannel = openWriteFileChannel(writeFileNumber); |
||||
readFileChannel = openReadFileChannel(readFileNumber); |
||||
} |
||||
|
||||
private FileChannel openReadFileChannel(final int fileNumber) { |
||||
try { |
||||
return FileChannel.open( |
||||
pathForFileNumber(fileNumber), |
||||
StandardOpenOption.DELETE_ON_CLOSE, |
||||
StandardOpenOption.READ); |
||||
} catch (final IOException e) { |
||||
throw new StorageException(e); |
||||
} |
||||
} |
||||
|
||||
private FileChannel openWriteFileChannel(final int fileNumber) { |
||||
try { |
||||
return FileChannel.open( |
||||
pathForFileNumber(fileNumber), |
||||
StandardOpenOption.TRUNCATE_EXISTING, |
||||
StandardOpenOption.WRITE, |
||||
StandardOpenOption.CREATE); |
||||
} catch (final IOException e) { |
||||
throw new StorageException( |
||||
"There was a problem opening FileChannel " + pathForFileNumber(fileNumber), e); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public synchronized void add(final T taskData) { |
||||
final Bytes data = serializer.apply(taskData); |
||||
try { |
||||
writeTaskData(data); |
||||
size++; |
||||
if (writeFileChannel.size() > rollWhenFileSizeExceedsBytes) { |
||||
LOG.debug("Writing reached end of file {}", writeFileNumber); |
||||
writeFileChannel.close(); |
||||
writeFileNumber++; |
||||
writeFileChannel = openWriteFileChannel(writeFileNumber); |
||||
} |
||||
} catch (final IOException e) { |
||||
throw new StorageException( |
||||
"There was a problem adding to FileChannel " + pathForFileNumber(writeFileNumber), e); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public synchronized Task<T> remove() { |
||||
if (isEmpty()) { |
||||
return null; |
||||
} |
||||
try { |
||||
final ByteBuffer dataBuffer = readNextTaskData(); |
||||
final T data = deserializer.apply(Bytes.wrapByteBuffer(dataBuffer)); |
||||
final FlatFileTask<T> task = new FlatFileTask<>(this, data); |
||||
outstandingTasks.add(task); |
||||
size--; |
||||
return task; |
||||
} catch (final IOException e) { |
||||
throw new StorageException( |
||||
"There was a problem removing from FileChannel " + pathForFileNumber(readFileNumber), e); |
||||
} |
||||
} |
||||
|
||||
private ByteBuffer readNextTaskData() throws IOException { |
||||
final int dataLength = readDataLength(); |
||||
final ByteBuffer dataBuffer = ByteBuffer.allocate(dataLength); |
||||
readBytes(dataBuffer, dataLength); |
||||
return dataBuffer; |
||||
} |
||||
|
||||
private void writeTaskData(final Bytes data) throws IOException { |
||||
final long offset = writeFileChannel.size(); |
||||
writeDataLength(data.size(), offset); |
||||
writeFileChannel.write(ByteBuffer.wrap(data.toArrayUnsafe()), offset + Integer.BYTES); |
||||
} |
||||
|
||||
private int readDataLength() throws IOException { |
||||
lengthBuffer.position(0); |
||||
lengthBuffer.limit(Integer.BYTES); |
||||
readBytes(lengthBuffer, Integer.BYTES); |
||||
return lengthBuffer.getInt(0); |
||||
} |
||||
|
||||
private void writeDataLength(final int size, final long offset) throws IOException { |
||||
lengthBuffer.position(0); |
||||
lengthBuffer.putInt(size); |
||||
lengthBuffer.flip(); |
||||
writeFileChannel.write(lengthBuffer, offset); |
||||
} |
||||
|
||||
private void readBytes(final ByteBuffer buffer, final int expectedLength) throws IOException { |
||||
int readBytes = readFileChannel.read(buffer); |
||||
|
||||
if (readBytes == -1 && writeFileNumber > readFileNumber) { |
||||
LOG.debug("Reading reached end of file {}", readFileNumber); |
||||
readFileChannel.close(); |
||||
readFileNumber++; |
||||
readFileChannel = openReadFileChannel(readFileNumber); |
||||
|
||||
readBytes = readFileChannel.read(buffer); |
||||
} |
||||
if (readBytes != expectedLength) { |
||||
throw new IllegalStateException( |
||||
"Task queue corrupted. Expected to read " |
||||
+ expectedLength |
||||
+ " bytes but only got " |
||||
+ readBytes); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public synchronized long size() { |
||||
return size; |
||||
} |
||||
|
||||
@Override |
||||
public synchronized boolean isEmpty() { |
||||
return size() == 0; |
||||
} |
||||
|
||||
@VisibleForTesting |
||||
int getReadFileNumber() { |
||||
return readFileNumber; |
||||
} |
||||
|
||||
@VisibleForTesting |
||||
int getWriteFileNumber() { |
||||
return writeFileNumber; |
||||
} |
||||
|
||||
@Override |
||||
public synchronized void clear() { |
||||
outstandingTasks.clear(); |
||||
try { |
||||
readFileChannel.close(); |
||||
writeFileChannel.close(); |
||||
for (int i = readFileNumber; i <= writeFileNumber; i++) { |
||||
final File file = pathForFileNumber(i).toFile(); |
||||
if (!file.delete() && file.exists()) { |
||||
LOG.error("Failed to delete tasks file {}", file.getAbsolutePath()); |
||||
} |
||||
} |
||||
readFileNumber = 0; |
||||
writeFileNumber = 0; |
||||
writeFileChannel = openWriteFileChannel(writeFileNumber); |
||||
readFileChannel = openReadFileChannel(readFileNumber); |
||||
size = 0; |
||||
} catch (final IOException e) { |
||||
throw new StorageException(e); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public synchronized boolean allTasksCompleted() { |
||||
return isEmpty() && outstandingTasks.isEmpty(); |
||||
} |
||||
|
||||
@Override |
||||
public synchronized void close() { |
||||
try { |
||||
readFileChannel.close(); |
||||
writeFileChannel.close(); |
||||
} catch (final IOException e) { |
||||
throw new StorageException(e); |
||||
} |
||||
} |
||||
|
||||
private Path pathForFileNumber(final int fileNumber) { |
||||
return storageDirectory.resolve(FILENAME_PREFIX + fileNumber); |
||||
} |
||||
|
||||
private synchronized boolean markTaskCompleted(final FlatFileTask<T> task) { |
||||
return outstandingTasks.remove(task); |
||||
} |
||||
|
||||
private synchronized void handleFailedTask(final FlatFileTask<T> task) { |
||||
if (markTaskCompleted(task)) { |
||||
add(task.getData()); |
||||
} |
||||
} |
||||
|
||||
public static class StorageException extends RuntimeException { |
||||
StorageException(final Throwable t) { |
||||
super(t); |
||||
} |
||||
|
||||
StorageException(final String m, final Throwable t) { |
||||
super(m, t); |
||||
} |
||||
} |
||||
|
||||
private static class FlatFileTask<T> implements Task<T> { |
||||
private final AtomicBoolean completed = new AtomicBoolean(false); |
||||
private final FlatFileTaskCollection<T> parentQueue; |
||||
private final T data; |
||||
|
||||
private FlatFileTask(final FlatFileTaskCollection<T> parentQueue, final T data) { |
||||
this.parentQueue = parentQueue; |
||||
this.data = data; |
||||
} |
||||
|
||||
@Override |
||||
public T getData() { |
||||
return data; |
||||
} |
||||
|
||||
@Override |
||||
public void markCompleted() { |
||||
if (completed.compareAndSet(false, true)) { |
||||
parentQueue.markTaskCompleted(this); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public void markFailed() { |
||||
if (completed.compareAndSet(false, true)) { |
||||
parentQueue.handleFailedTask(this); |
||||
} |
||||
} |
||||
} |
||||
} |
@ -1,90 +0,0 @@ |
||||
/* |
||||
* Copyright ConsenSys AG. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on |
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the |
||||
* specific language governing permissions and limitations under the License. |
||||
* |
||||
* SPDX-License-Identifier: Apache-2.0 |
||||
*/ |
||||
package org.hyperledger.besu.services.tasks; |
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.file.Path; |
||||
import java.util.ArrayList; |
||||
import java.util.List; |
||||
import java.util.function.Function; |
||||
|
||||
import org.apache.tuweni.bytes.Bytes; |
||||
import org.junit.Rule; |
||||
import org.junit.Test; |
||||
import org.junit.rules.TemporaryFolder; |
||||
|
||||
public class FlatFileTaskCollectionTest |
||||
extends AbstractTaskQueueTest<FlatFileTaskCollection<Bytes>> { |
||||
|
||||
private static final int ROLL_SIZE = 10; |
||||
@Rule public final TemporaryFolder folder = new TemporaryFolder(); |
||||
|
||||
@Override |
||||
protected FlatFileTaskCollection<Bytes> createQueue() throws IOException { |
||||
final Path dataDir = folder.newFolder().toPath(); |
||||
return createQueue(dataDir); |
||||
} |
||||
|
||||
private FlatFileTaskCollection<Bytes> createQueue(final Path dataDir) { |
||||
return new FlatFileTaskCollection<>( |
||||
dataDir, Function.identity(), Function.identity(), ROLL_SIZE); |
||||
} |
||||
|
||||
@Test |
||||
public void shouldRollFilesWhenSizeExceeded() throws Exception { |
||||
final Path dataDir = folder.newFolder().toPath(); |
||||
try (final FlatFileTaskCollection<Bytes> queue = createQueue(dataDir)) { |
||||
final List<Bytes> tasks = new ArrayList<>(); |
||||
|
||||
addItem(queue, tasks, 0); |
||||
assertThat(queue.getWriteFileNumber()).isEqualTo(0); |
||||
int tasksInFirstFile = 1; |
||||
while (queue.getWriteFileNumber() == 0) { |
||||
addItem(queue, tasks, tasksInFirstFile); |
||||
tasksInFirstFile++; |
||||
} |
||||
|
||||
assertThat(queue.getWriteFileNumber()).isGreaterThan(0); |
||||
assertThat(queue.getReadFileNumber()).isEqualTo(0); |
||||
|
||||
// Add extra items to be sure we have at least one in a later file
|
||||
addItem(queue, tasks, 123); |
||||
addItem(queue, tasks, 124); |
||||
|
||||
final List<Bytes> removedTasks = new ArrayList<>(); |
||||
// Read through all the items in the first file.
|
||||
for (int i = 0; i < tasksInFirstFile; i++) { |
||||
removedTasks.add(queue.remove().getData()); |
||||
} |
||||
|
||||
// read one more to make sure we are reading from the next file
|
||||
removedTasks.add(queue.remove().getData()); |
||||
assertThat(queue.getReadFileNumber()).isEqualTo(1); |
||||
|
||||
// Check that all tasks were read correctly.
|
||||
removedTasks.add(queue.remove().getData()); |
||||
assertThat(queue.isEmpty()).isTrue(); |
||||
assertThat(removedTasks).isEqualTo(tasks); |
||||
} |
||||
} |
||||
|
||||
private void addItem( |
||||
final FlatFileTaskCollection<Bytes> queue, final List<Bytes> tasks, final int value) { |
||||
tasks.add(Bytes.of(value)); |
||||
queue.add(Bytes.of(value)); |
||||
} |
||||
} |
Loading…
Reference in new issue