Rename logs bloom indexer to log bloom cache to match CLI flag. (#401)

Changes class names, variables, and CLI flags as needed.

Signed-off-by: Danno Ferrin <danno.ferrin@gmail.com>
pull/404/head
Danno Ferrin 5 years ago committed by GitHub
parent bb0c9cb017
commit 20664f96da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/ProcessBesuNodeRunner.java
  2. 2
      acceptance-tests/dsl/src/main/java/org/hyperledger/besu/tests/acceptance/dsl/node/ThreadBesuNodeRunner.java
  3. 19
      besu/src/main/java/org/hyperledger/besu/Runner.java
  4. 8
      besu/src/main/java/org/hyperledger/besu/RunnerBuilder.java
  5. 8
      besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
  6. 14
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/operator/GenerateLogBloomCache.java
  7. 2
      besu/src/test/java/org/hyperledger/besu/cli/BesuCommandTest.java
  8. 2
      besu/src/test/java/org/hyperledger/besu/cli/CommandTestAbstract.java
  9. 3
      besu/src/test/resources/everything_config.toml
  10. 4
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/AdminGenerateLogBloomCache.java
  11. 26
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/query/AutoTransactionLogBloomCachingService.java
  12. 13
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/query/BlockchainQueries.java
  13. 52
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/query/TransactionLogBloomCacher.java
  14. 18
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/AdminGenerateLogBloomCacheTest.java
  15. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/query/BlockchainQueriesLogCacheTest.java

@ -257,7 +257,7 @@ public class ProcessBesuNodeRunner implements BesuNodeRunner {
params.add("--key-value-storage");
params.add("rocksdb");
params.add("--auto-logs-bloom-indexing-enabled");
params.add("--auto-log-bloom-caching-enabled");
params.add("false");
LOG.info("Creating besu process with params {}", params);

@ -195,7 +195,7 @@ public class ThreadBesuNodeRunner implements BesuNodeRunner {
.map(EnodeURL::fromString)
.collect(Collectors.toList()))
.besuPluginContext(new BesuPluginContextImpl())
.autoLogsBloomIndexing(false)
.autoLogBloomCaching(false)
.build();
runner.start();

@ -18,8 +18,8 @@ import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLHttpService;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcHttpService;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketService;
import org.hyperledger.besu.ethereum.api.query.AutoTransactionLogsIndexingService;
import org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer;
import org.hyperledger.besu.ethereum.api.query.AutoTransactionLogBloomCachingService;
import org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.p2p.network.NetworkRunner;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
@ -61,7 +61,8 @@ public class Runner implements AutoCloseable {
private final BesuController<?> besuController;
private final Path dataDir;
private final Optional<StratumServer> stratumServer;
private final Optional<AutoTransactionLogsIndexingService> autoTransactionLogsIndexingService;
private final Optional<AutoTransactionLogBloomCachingService>
autoTransactionLogBloomCachingService;
Runner(
final Vertx vertx,
@ -74,7 +75,7 @@ public class Runner implements AutoCloseable {
final Optional<MetricsService> metrics,
final BesuController<?> besuController,
final Path dataDir,
final Optional<TransactionLogsIndexer> transactionLogsIndexer,
final Optional<TransactionLogBloomCacher> transactionLogBloomCacher,
final Blockchain blockchain) {
this.vertx = vertx;
this.networkRunner = networkRunner;
@ -86,9 +87,9 @@ public class Runner implements AutoCloseable {
this.besuController = besuController;
this.dataDir = dataDir;
this.stratumServer = stratumServer;
this.autoTransactionLogsIndexingService =
transactionLogsIndexer.map(
indexer -> new AutoTransactionLogsIndexingService(blockchain, indexer));
this.autoTransactionLogBloomCachingService =
transactionLogBloomCacher.map(
cacher -> new AutoTransactionLogBloomCachingService(blockchain, cacher));
}
public void start() {
@ -112,7 +113,7 @@ public class Runner implements AutoCloseable {
LOG.info("Ethereum main loop is up.");
writeBesuPortsToFile();
writeBesuNetworksToFile();
autoTransactionLogsIndexingService.ifPresent(AutoTransactionLogsIndexingService::start);
autoTransactionLogBloomCachingService.ifPresent(AutoTransactionLogBloomCachingService::start);
} catch (final Exception ex) {
LOG.error("Startup failed", ex);
throw new IllegalStateException(ex);
@ -135,7 +136,7 @@ public class Runner implements AutoCloseable {
networkRunner.stop();
waitForServiceToStop("Network", networkRunner::awaitStop);
autoTransactionLogsIndexingService.ifPresent(AutoTransactionLogsIndexingService::stop);
autoTransactionLogBloomCachingService.ifPresent(AutoTransactionLogBloomCachingService::stop);
natService.stop();
besuController.close();
vertx.close((res) -> vertxShutdownLatch.countDown());

@ -144,7 +144,7 @@ public class RunnerBuilder {
private Collection<EnodeURL> staticNodes = Collections.emptyList();
private Optional<String> identityString = Optional.empty();
private BesuPluginContextImpl besuPluginContext;
private boolean autoLogsBloomIndexing = true;
private boolean autoLogBloomCaching = true;
public RunnerBuilder vertx(final Vertx vertx) {
this.vertx = vertx;
@ -271,8 +271,8 @@ public class RunnerBuilder {
return this;
}
public RunnerBuilder autoLogsBloomIndexing(final boolean autoLogsBloomIndexing) {
this.autoLogsBloomIndexing = autoLogsBloomIndexing;
public RunnerBuilder autoLogBloomCaching(final boolean autoLogBloomCaching) {
this.autoLogBloomCaching = autoLogBloomCaching;
return this;
}
@ -534,7 +534,7 @@ public class RunnerBuilder {
metricsService,
besuController,
dataDir,
autoLogsBloomIndexing ? blockchainQueries.getTransactionLogsIndexer() : Optional.empty(),
autoLogBloomCaching ? blockchainQueries.getTransactionLogBloomCacher() : Optional.empty(),
context.getBlockchain());
}

@ -791,10 +791,10 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@Option(
names = {"--auto-logs-bloom-indexing-enabled"},
description = "Enable Automatic logs bloom indexing (default: ${DEFAULT-VALUE})",
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogsBloomIndexingEnabled = true;
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
@ -1701,7 +1701,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogsBloomIndexing(autoLogsBloomIndexingEnabled)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.build();
addShutdownHook(runner);

@ -19,10 +19,10 @@ package org.hyperledger.besu.cli.subcommands.operator;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static org.hyperledger.besu.cli.DefaultCommandValues.MANDATORY_LONG_FORMAT_HELP;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer.BLOCKS_PER_BLOOM_CACHE;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher.BLOCKS_PER_BLOOM_CACHE;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer;
import org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.eth.manager.EthScheduler;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
@ -43,7 +43,7 @@ public class GenerateLogBloomCache implements Runnable {
names = "--start-block",
paramLabel = MANDATORY_LONG_FORMAT_HELP,
description =
"The block to start generating indexes. Must be an increment of "
"The block to start generating the cache. Must be an increment of "
+ BLOCKS_PER_BLOOM_CACHE
+ " (default: ${DEFAULT-VALUE})",
arity = "1..1")
@ -52,7 +52,7 @@ public class GenerateLogBloomCache implements Runnable {
@Option(
names = "--end-block",
paramLabel = MANDATORY_LONG_FORMAT_HELP,
description = "The block to stop generating indexes (default is last block of the chain).",
description = "The block to stop generating the cache (default is last block of the chain).",
arity = "1..1")
private final Long endBlock = Long.MAX_VALUE;
@ -69,9 +69,9 @@ public class GenerateLogBloomCache implements Runnable {
final EthScheduler scheduler = new EthScheduler(1, 1, 1, 1, new NoOpMetricsSystem());
try {
final long finalBlock = Math.min(blockchain.getChainHeadBlockNumber(), endBlock);
final TransactionLogsIndexer indexer =
new TransactionLogsIndexer(blockchain, cacheDir, scheduler);
indexer.generateLogBloomCache(startBlock, finalBlock);
final TransactionLogBloomCacher cacher =
new TransactionLogBloomCacher(blockchain, cacheDir, scheduler);
cacher.generateLogBloomCache(startBlock, finalBlock);
} finally {
scheduler.stop();
try {

@ -175,7 +175,7 @@ public class BesuCommandTest extends CommandTestAbstract {
verify(mockRunnerBuilder).webSocketConfiguration(eq(DEFAULT_WEB_SOCKET_CONFIGURATION));
verify(mockRunnerBuilder).metricsConfiguration(eq(DEFAULT_METRICS_CONFIGURATION));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkArg.capture());
verify(mockRunnerBuilder).autoLogsBloomIndexing(eq(true));
verify(mockRunnerBuilder).autoLogBloomCaching(eq(true));
verify(mockRunnerBuilder).build();
verify(mockControllerBuilderFactory).fromEthNetworkConfig(ethNetworkArg.capture(), any());

@ -220,7 +220,7 @@ public abstract class CommandTestAbstract {
when(mockRunnerBuilder.staticNodes(any())).thenReturn(mockRunnerBuilder);
when(mockRunnerBuilder.identityString(any())).thenReturn(mockRunnerBuilder);
when(mockRunnerBuilder.besuPluginContext(any())).thenReturn(mockRunnerBuilder);
when(mockRunnerBuilder.autoLogsBloomIndexing(anyBoolean())).thenReturn(mockRunnerBuilder);
when(mockRunnerBuilder.autoLogBloomCaching(anyBoolean())).thenReturn(mockRunnerBuilder);
when(mockRunnerBuilder.build()).thenReturn(mockRunner);
when(storageService.getByName("rocksdb")).thenReturn(Optional.of(rocksDBStorageFactory));

@ -136,4 +136,5 @@ key-value-storage="rocksdb"
# Gas limit
target-gas-limit=8000000
auto-logs-bloom-indexing-enabled=true
# transaction log bloom filter caching
auto-log-bloom-caching-enabled=true

@ -73,8 +73,8 @@ public class AdminGenerateLogBloomCache implements JsonRpcMethod {
return new JsonRpcSuccessResponse(
requestContext.getRequest().getId(),
blockchainQueries
.getTransactionLogsIndexer()
.map(indexer -> indexer.requestIndexing(startBlock, stopBlock))
.getTransactionLogBloomCacher()
.map(cacher -> cacher.requestCaching(startBlock, stopBlock))
.orElse(null));
}
}

@ -25,23 +25,23 @@ import java.util.OptionalLong;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class AutoTransactionLogsIndexingService {
public class AutoTransactionLogBloomCachingService {
protected static final Logger LOG = LogManager.getLogger();
private final Blockchain blockchain;
private final TransactionLogsIndexer transactionLogsIndexer;
private final TransactionLogBloomCacher transactionLogBloomCacher;
private OptionalLong blockAddedSubscriptionId = OptionalLong.empty();
private OptionalLong chainReorgSubscriptionId = OptionalLong.empty();
public AutoTransactionLogsIndexingService(
final Blockchain blockchain, final TransactionLogsIndexer transactionLogsIndexer) {
public AutoTransactionLogBloomCachingService(
final Blockchain blockchain, final TransactionLogBloomCacher transactionLogBloomCacher) {
this.blockchain = blockchain;
this.transactionLogsIndexer = transactionLogsIndexer;
this.transactionLogBloomCacher = transactionLogBloomCacher;
}
public void start() {
try {
LOG.info("Starting Auto transaction logs indexing service.");
final Path cacheDir = transactionLogsIndexer.getCacheDir();
LOG.info("Starting auto transaction log bloom caching service.");
final Path cacheDir = transactionLogBloomCacher.getCacheDir();
if (!cacheDir.toFile().exists() || !cacheDir.toFile().isDirectory()) {
Files.createDirectory(cacheDir);
}
@ -50,25 +50,25 @@ public class AutoTransactionLogsIndexingService {
blockchain.observeBlockAdded(
(event, __) -> {
if (event.isNewCanonicalHead()) {
transactionLogsIndexer.cacheLogsBloomForBlockHeader(
transactionLogBloomCacher.cacheLogsBloomForBlockHeader(
event.getBlock().getHeader());
}
}));
chainReorgSubscriptionId =
OptionalLong.of(
blockchain.observeChainReorg(
(header, __) -> transactionLogsIndexer.cacheLogsBloomForBlockHeader(header)));
(header, __) -> transactionLogBloomCacher.cacheLogsBloomForBlockHeader(header)));
transactionLogsIndexer
transactionLogBloomCacher
.getScheduler()
.scheduleFutureTask(transactionLogsIndexer::indexAll, Duration.ofMinutes(1));
.scheduleFutureTask(transactionLogBloomCacher::cacheAll, Duration.ofMinutes(1));
} catch (IOException e) {
LOG.error("Unhandled indexing exception.", e);
LOG.error("Unhandled caching exception.", e);
}
}
public void stop() {
LOG.info("Shutting down Auto transaction logs indexing service.");
LOG.info("Shutting down Auto transaction logs caching service.");
blockAddedSubscriptionId.ifPresent(blockchain::removeObserver);
chainReorgSubscriptionId.ifPresent(blockchain::removeChainReorgObserver);
}

@ -15,7 +15,7 @@
package org.hyperledger.besu.ethereum.api.query;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer.BLOCKS_PER_BLOOM_CACHE;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher.BLOCKS_PER_BLOOM_CACHE;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.chain.TransactionLocation;
@ -61,7 +61,7 @@ public class BlockchainQueries {
private final WorldStateArchive worldStateArchive;
private final Blockchain blockchain;
private final Optional<Path> cachePath;
private final Optional<TransactionLogsIndexer> transactionLogsIndexer;
private final Optional<TransactionLogBloomCacher> transactionLogBloomCacher;
public BlockchainQueries(final Blockchain blockchain, final WorldStateArchive worldStateArchive) {
this(blockchain, worldStateArchive, Optional.empty(), Optional.empty());
@ -82,9 +82,10 @@ public class BlockchainQueries {
this.blockchain = blockchain;
this.worldStateArchive = worldStateArchive;
this.cachePath = cachePath;
this.transactionLogsIndexer =
this.transactionLogBloomCacher =
(cachePath.isPresent() && scheduler.isPresent())
? Optional.of(new TransactionLogsIndexer(blockchain, cachePath.get(), scheduler.get()))
? Optional.of(
new TransactionLogBloomCacher(blockchain, cachePath.get(), scheduler.get()))
: Optional.empty();
}
@ -96,8 +97,8 @@ public class BlockchainQueries {
return worldStateArchive;
}
public Optional<TransactionLogsIndexer> getTransactionLogsIndexer() {
return transactionLogsIndexer;
public Optional<TransactionLogBloomCacher> getTransactionLogBloomCacher() {
return transactionLogBloomCacher;
}
/**

@ -43,7 +43,7 @@ import com.fasterxml.jackson.annotation.JsonGetter;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class TransactionLogsIndexer {
public class TransactionLogBloomCacher {
private static final Logger LOG = LogManager.getLogger();
@ -59,9 +59,9 @@ public class TransactionLogsIndexer {
private final Path cacheDir;
private final IndexingStatus indexingStatus = new IndexingStatus();
private final CachingStatus cachingStatus = new CachingStatus();
public TransactionLogsIndexer(
public TransactionLogBloomCacher(
final Blockchain blockchain, final Path cacheDir, final EthScheduler scheduler) {
this.blockchain = blockchain;
this.cacheDir = cacheDir;
@ -69,7 +69,7 @@ public class TransactionLogsIndexer {
this.cachedSegments = new TreeMap<>();
}
public void indexAll() {
public void cacheAll() {
ensurePreviousSegmentsArePresent(blockchain.getChainHeadBlockNumber());
}
@ -81,24 +81,24 @@ public class TransactionLogsIndexer {
return calculateCacheFileName(Long.toString(blockNumber / BLOCKS_PER_BLOOM_CACHE), cacheDir);
}
public IndexingStatus generateLogBloomCache(final long start, final long stop) {
public CachingStatus generateLogBloomCache(final long start, final long stop) {
checkArgument(
start % BLOCKS_PER_BLOOM_CACHE == 0, "Start block must be at the beginning of a file");
try {
indexingStatus.indexing = true;
cachingStatus.caching = true;
LOG.info(
"Generating transaction log indexes from block {} to block {} in {}",
"Generating transaction log bloom cache from block {} to block {} in {}",
start,
stop,
cacheDir);
if (!Files.isDirectory(cacheDir) && !cacheDir.toFile().mkdirs()) {
LOG.error("Cache directory '{}' does not exist and could not be made.", cacheDir);
return indexingStatus;
return cachingStatus;
}
final File pendingFile = calculateCacheFileName(PENDING, cacheDir);
for (long blockNum = start; blockNum < stop; blockNum += BLOCKS_PER_BLOOM_CACHE) {
LOG.info("Indexing segment at {}", blockNum);
LOG.info("Caching segment at {}", blockNum);
try (final FileOutputStream fos = new FileOutputStream(pendingFile)) {
final long blockCount = fillCacheFile(blockNum, blockNum + BLOCKS_PER_BLOOM_CACHE, fos);
if (blockCount == BLOCKS_PER_BLOOM_CACHE) {
@ -114,12 +114,12 @@ public class TransactionLogsIndexer {
}
}
} catch (final Exception e) {
LOG.error("Unhandled indexing exception", e);
LOG.error("Unhandled caching exception", e);
} finally {
indexingStatus.indexing = false;
LOG.info("Indexing request complete");
cachingStatus.caching = false;
LOG.info("Caching request complete");
}
return indexingStatus;
return cachingStatus;
}
private long fillCacheFile(
@ -131,7 +131,7 @@ public class TransactionLogsIndexer {
break;
}
fillCacheFileWithBlock(maybeHeader.get(), fos);
indexingStatus.currentBlock = blockNum;
cachingStatus.currentBlock = blockNum;
blockNum++;
}
return blockNum - startBlock;
@ -152,12 +152,12 @@ public class TransactionLogsIndexer {
writer.write(ensureBloomBitsAreCorrectLength(blockHeader.getLogsBloom().toArray()));
}
} catch (IOException e) {
LOG.error("Unhandled indexing exception.", e);
LOG.error("Unhandled caching exception.", e);
}
}
private void ensurePreviousSegmentsArePresent(final long blockNumber) {
if (!indexingStatus.isIndexing()) {
if (!cachingStatus.isCaching()) {
scheduler.scheduleFutureTask(
() -> {
long currentSegment = (blockNumber / BLOCKS_PER_BLOOM_CACHE) - 1;
@ -188,15 +188,15 @@ public class TransactionLogsIndexer {
return logs;
}
public IndexingStatus requestIndexing(final long fromBlock, final long toBlock) {
public CachingStatus requestCaching(final long fromBlock, final long toBlock) {
boolean requestAccepted = false;
try {
if ((fromBlock < toBlock) && submissionLock.tryLock(100, TimeUnit.MILLISECONDS)) {
try {
if (!indexingStatus.indexing) {
if (!cachingStatus.caching) {
requestAccepted = true;
indexingStatus.startBlock = fromBlock;
indexingStatus.endBlock = toBlock;
cachingStatus.startBlock = fromBlock;
cachingStatus.endBlock = toBlock;
scheduler.scheduleComputationTask(
() ->
generateLogBloomCache(
@ -209,8 +209,8 @@ public class TransactionLogsIndexer {
} catch (final InterruptedException e) {
// ignore
}
indexingStatus.requestAccepted = requestAccepted;
return indexingStatus;
cachingStatus.requestAccepted = requestAccepted;
return cachingStatus;
}
public EthScheduler getScheduler() {
@ -221,11 +221,11 @@ public class TransactionLogsIndexer {
return cacheDir;
}
public static final class IndexingStatus {
public static final class CachingStatus {
long startBlock;
long endBlock;
volatile long currentBlock;
volatile boolean indexing;
volatile boolean caching;
boolean requestAccepted;
@JsonGetter
@ -244,8 +244,8 @@ public class TransactionLogsIndexer {
}
@JsonGetter
public boolean isIndexing() {
return indexing;
public boolean isCaching() {
return caching;
}
@JsonGetter

@ -23,8 +23,8 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer;
import org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer.IndexingStatus;
import org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher;
import org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher.CachingStatus;
import java.util.List;
import java.util.Optional;
@ -41,7 +41,7 @@ import org.mockito.junit.MockitoJUnitRunner;
public class AdminGenerateLogBloomCacheTest {
@Mock private BlockchainQueries blockchainQueries;
@Mock private TransactionLogsIndexer transactionLogsIndexer;
@Mock private TransactionLogBloomCacher transactionLogBloomCacher;
@Captor private ArgumentCaptor<Long> fromBlock;
@Captor private ArgumentCaptor<Long> toBlock;
@ -53,12 +53,12 @@ public class AdminGenerateLogBloomCacheTest {
}
@Test
public void requestWithZeroParameters_NoIndexer_returnsNull() {
public void requestWithZeroParameters_NoCacher_returnsNull() {
final JsonRpcRequestContext request =
new JsonRpcRequestContext(
new JsonRpcRequest("2.0", "admin_generateLogBloomCache", new String[] {}));
when(blockchainQueries.getTransactionLogsIndexer()).thenReturn(Optional.empty());
when(blockchainQueries.getTransactionLogBloomCacher()).thenReturn(Optional.empty());
final JsonRpcResponse actualResponse = method.response(request);
@ -109,11 +109,11 @@ public class AdminGenerateLogBloomCacheTest {
final JsonRpcRequestContext request =
new JsonRpcRequestContext(new JsonRpcRequest("2.0", "admin_generateLogBloomCache", args));
final IndexingStatus expectedStatus = new IndexingStatus();
final CachingStatus expectedStatus = new CachingStatus();
when(blockchainQueries.getTransactionLogsIndexer())
.thenReturn(Optional.of(transactionLogsIndexer));
when(transactionLogsIndexer.requestIndexing(fromBlock.capture(), toBlock.capture()))
when(blockchainQueries.getTransactionLogBloomCacher())
.thenReturn(Optional.of(transactionLogBloomCacher));
when(transactionLogBloomCacher.requestCaching(fromBlock.capture(), toBlock.capture()))
.thenReturn(expectedStatus);
final JsonRpcResponse actualResponse = method.response(request);

@ -16,7 +16,7 @@
package org.hyperledger.besu.ethereum.api.query;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogsIndexer.BLOCKS_PER_BLOOM_CACHE;
import static org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher.BLOCKS_PER_BLOOM_CACHE;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.times;

Loading…
Cancel
Save