[NC-1646] Removed system/smoke tests + resources. (#32)

mark-terry 6 years ago committed by GitHub
parent 90705bbcb9
commit 591a9c9979
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      Jenkinsfile
  2. 4
      README.md
  3. 1
      settings.gradle
  4. 41
      system-tests/build.gradle
  5. 152
      system-tests/src/test/java/net/consensys/pantheon/tests/ClusterTestBase.java
  6. 86
      system-tests/src/test/java/net/consensys/pantheon/tests/LogClusterInfoTest.java
  7. 101
      system-tests/src/test/java/net/consensys/pantheon/tests/PantheonSmokeTest.java
  8. 121
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/DockerUtils.java
  9. 65
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/NodeAdminRpcUtils.java
  10. 494
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/TestCluster.java
  11. 177
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/TestClusterNode.java
  12. 74
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/TestDockerNode.java
  13. 71
      system-tests/src/test/java/net/consensys/pantheon/tests/cluster/TestGethLocalNode.java
  14. 25
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/Dockerfile-geth
  15. 45
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/Dockerfile-geth-ubuntu
  16. 1
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/bash_aliases
  17. 25
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/genesis.json
  18. 42
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/gethUtils.sh
  19. 11
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/run.sh
  20. 13
      system-tests/src/test/resources/net/consensys/pantheon/tests/cluster/docker/geth/runBootNode.sh
  21. 26707
      system-tests/src/test/resources/net/consensys/pantheon/tests/ibft.json

3
Jenkinsfile vendored

@ -50,9 +50,6 @@ node {
stage('Check javadoc') {
sh './gradlew --no-daemon --parallel javadoc'
}
// stage('Smoke test') {
// sh 'DOCKER_HOST=$DOCKER_PORT DOCKER_HOSTNAME=docker ./gradlew --no-daemon smokeTest'
// }
stage('Jacoco root report') {
sh './gradlew --no-daemon jacocoRootReport'
}

@ -56,10 +56,6 @@ The reference tests (described below) can be triggered with:
```
./gradlew referenceTest
```
The system tests can be triggered with:
```
./gradlew smokeTest
```
## Running Pantheon

@ -19,5 +19,4 @@ include 'pantheon'
include 'services:kvstore'
include 'testutil'
include 'util'
include 'system-tests'
include 'errorprone-checks'

@ -1,41 +0,0 @@
dependencies {
implementation 'com.github.docker-java:docker-java'
implementation 'com.squareup.okhttp3:okhttp'
implementation 'io.vertx:vertx-core'
implementation 'org.apache.logging.log4j:log4j-api'
implementation 'org.assertj:assertj-core'
runtime 'org.apache.logging.log4j:log4j-core'
testImplementation project(':ethereum:client')
testImplementation project(':ethereum:jsonrpc')
testImplementation project(':pantheon')
testImplementation project(':util')
testImplementation 'junit:junit'
testImplementation 'org.awaitility:awaitility'
}
test.enabled = false
task smokeTest(type: Test) {
mustRunAfter rootProject.subprojects*.test
description = 'Runs basic Pantheon smoke tests.'
group = 'verification'
systemProperty 'pantheon.test.distribution', "${buildDir}/pantheon-${project.version}"
}
task unpackTarDistribution(type: Copy) {
dependsOn rootProject.distTar
def tar = file("${projectDir}/../build/distributions/pantheon-${project.version}.tar.gz")
inputs.files tar
from tarTree(tar)
into {buildDir}
}
smokeTest.dependsOn(unpackTarDistribution)
smokeTest.dependsOn(rootProject.installDist)
// Commenting out dependency so that check (on CircleCI won't try to run this and fail)
// Jenkins can run it as a separate task.
//check.dependsOn(smokeTest)

@ -1,152 +0,0 @@
package net.consensys.pantheon.tests;
import static java.util.stream.Collectors.joining;
import net.consensys.pantheon.tests.cluster.NodeAdminRpcUtils;
import net.consensys.pantheon.tests.cluster.TestCluster;
import net.consensys.pantheon.tests.cluster.TestClusterNode;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.assertj.core.api.Assertions;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestRule;
import org.junit.rules.TestWatcher;
import org.junit.runner.Description;
public abstract class ClusterTestBase {
private static final Logger LOG = LogManager.getLogger();
@Rule
public TestRule watcher =
new TestWatcher() {
@Override
protected void starting(final Description description) {
LOG.info("Starting test: " + description.getMethodName());
}
@Override
protected void finished(final Description description) {
LOG.info("Finished test: " + description.getMethodName());
}
};
// TODO: I need to remember how I used to make this work in one shot, even with static
// @BeforeClass
protected static String suiteStartTime = null;
protected static String suiteName = null;
@BeforeClass
public static void setTestSuiteStartTime() {
final SimpleDateFormat fmt = new SimpleDateFormat("yyyyMMdd-HHmmss");
suiteStartTime = fmt.format(new Date());
}
public static void suiteName(final Class<?> clazz) {
suiteName = clazz.getSimpleName() + "-" + suiteStartTime;
}
public static String suiteName() {
return suiteName;
}
@BeforeClass
public static void printSystemProperties() {
final String env =
System.getenv()
.entrySet()
.stream()
.map(e -> e.getKey() + "=" + e.getValue())
.collect(joining(System.getProperty("line.separator")));
final String s =
System.getProperties()
.entrySet()
.stream()
.map(e -> e.getKey() + "=" + e.getValue())
.collect(joining(System.getProperty("line.separator")));
LOG.info("System Properties\n" + s);
LOG.info("Environment\n" + env);
}
/**
* Adds and verifies a Pantheon full-node container to the cluster
*
* @param cluster cluster to add node to
* @param containerName name to give the new container
*/
protected static TestClusterNode addVerifiedPantheonCtr(
final TestCluster cluster, final String containerName) throws Exception {
final TestClusterNode node = cluster.addPantheonFullDockerNode(containerName);
Assertions.assertThat(cluster.getNodes()).contains(node);
NodeAdminRpcUtils.testWeb3ClientVersionPantheon(node);
// TODO: Better verifications before handing off for testing
return node;
}
/**
* Adds and verifies a Geth full-node container to the cluster
*
* @param cluster cluster to add node to
* @param containerName name to give the new container
*/
protected static TestClusterNode addVerifiedGethCtr(
final TestCluster cluster, final String containerName) throws Exception {
final TestClusterNode node = cluster.addGethFullDockerNode(containerName);
Assertions.assertThat(cluster.getNodes()).contains(node);
NodeAdminRpcUtils.testWeb3ClientVersionGeth(node);
// TODO: Better verifications before handing off for testing
return node;
}
/**
* Adds and verifies a Geth boot-node container to the cluster. Generally, only one is required in
* the cluster, but adding more than one should not be harmful.
*
* @param cluster cluster to add node to
* @param containerName name to give the new container
*/
protected static TestClusterNode addVerifiedGethBootCtr(
final TestCluster cluster, final String containerName) throws Exception {
final TestClusterNode node = cluster.addGethBootDockerNode(containerName);
Assertions.assertThat(cluster.getNodes()).contains(node);
// TODO: Better verifications before handing off for testing
return node;
}
/**
* Adds and verifies a Geth local process to the cluster. A new tmp data directory will be created
* for each local process node.
*
* <p>TODO: Make this play nice with other nodes running in docker containers.
*
* @param cluster cluster to add node to
* @param gethCmd path to the geth command. ie /usr/bin/geth
* @param nodeNum two-digit node number. This will be used in node naming and port numbering.
*/
@SuppressWarnings("unused")
protected static TestClusterNode addVerifiedLocalGethProcess(
final TestCluster cluster, final String gethCmd, final String nodeNum) throws Exception {
final TestClusterNode node = cluster.addGethLocalNode(gethCmd, nodeNum);
Assertions.assertThat(cluster.getNodes()).contains(node);
NodeAdminRpcUtils.testWeb3ClientVersionGeth(node);
// TODO: Better verifications before handing off for testing
return node;
}
/** Helper method to stand up a private cluster. */
@SuppressWarnings("unused")
private void buildVerifiedCluster(
final TestCluster cluster,
final int numPantheonNodes,
final int numGethNodes,
final int numParityNodes) {
// TODO: Implement if useful
// TODO: If test times are too long, consider creating/destroying nodes in parallel
}
}

@ -1,86 +0,0 @@
package net.consensys.pantheon.tests;
import static net.consensys.pantheon.tests.cluster.NodeAdminRpcUtils.getPeersJson;
import net.consensys.pantheon.tests.cluster.TestCluster;
import net.consensys.pantheon.tests.cluster.TestClusterNode;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@SuppressWarnings("UnusedReturnValue")
public class LogClusterInfoTest extends ClusterTestBase {
private static final Logger LOG = LogManager.getLogger();
protected static TestCluster testCluster;
@BeforeClass
public static void createCluster() throws Exception {
suiteName(LogClusterInfoTest.class);
try {
// TODO: If test times are too long, consider creating/destroying nodes in parallel
testCluster = new TestCluster(suiteName().toLowerCase());
addVerifiedGethBootCtr(testCluster, suiteName() + "-GethBoot00");
addVerifiedGethCtr(testCluster, suiteName() + "-Geth01");
addVerifiedGethCtr(testCluster, suiteName() + "-Geth02");
addVerifiedPantheonCtr(testCluster, suiteName() + "-Pantheon01");
addVerifiedPantheonCtr(testCluster, suiteName() + "-Pantheon02");
// addVerifiedLocalGethProcess(cluster, "/usr/bin/geth", "04");
Thread.sleep(2_000); // Wait for everything to settle
} catch (final Exception e) {
LOG.error("Unable to build private test cluster.", e);
throw e;
}
}
@AfterClass
public static void destroyCluster() {
try {
testCluster.close();
} catch (final Exception e) {
LOG.error("Error destroying cluster. Ignoring and continuing. cluster=" + testCluster, e);
}
}
@Test
public void logClusterInfo() throws Exception {
LOG.info("Cluster: " + testCluster);
}
@Test
public void logPeerPorts() throws Exception {
for (final TestClusterNode node : testCluster.getNodes()) {
if (node.isBootNode()) {
continue; // Skip it. Boot nodes don't have any interfaces to query
}
LOG.info("node=" + node);
final JsonArray peersJson = getPeersJson(node);
for (int i = 0; i < peersJson.size(); i++) {
final JsonObject network = peersJson.getJsonObject(i).getJsonObject("network");
final String local = network.getString("localAddress");
final String remote = network.getString("remoteAddress");
LOG.info(
String.format(
"Node %s discovered peer: local port %s, remote port %s",
node.getNodeName(), local, remote));
}
}
}
@Test
public void logAdminPeers() throws Exception {
for (final TestClusterNode node : testCluster.getNodes()) {
if (node.isBootNode()) {
continue; // Skip it. Boot nodes don't have any interfaces to query
}
LOG.info(
String.format(
"Node %s Admin.Peers:\n%s", node.getNodeName(), getPeersJson(node).encodePrettily()));
}
}
}

@ -1,101 +0,0 @@
package net.consensys.pantheon.tests;
import static com.google.common.io.MoreFiles.deleteDirectoryContents;
import static org.assertj.core.api.Assertions.assertThat;
import net.consensys.pantheon.ethereum.jsonrpc.JsonRpcConfiguration;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.Socket;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import com.google.common.io.RecursiveDeleteOption;
import io.vertx.core.json.Json;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import org.awaitility.Awaitility;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public final class PantheonSmokeTest {
@Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder();
private Path pantheonBin;
@Before
public void before() throws IOException {
final String dist = System.getProperty("pantheon.test.distribution");
if (dist == null) {
throw new IllegalStateException(
"System property 'pantheon.test.distribution' must be set in order for this test to work.");
}
final Path pantheonBase = Paths.get(dist);
final Path dataDirectory = pantheonBase.resolve("data");
Files.createDirectories(dataDirectory);
deleteDirectoryContents(dataDirectory, RecursiveDeleteOption.ALLOW_INSECURE);
pantheonBin = pantheonBase.resolve("bin").resolve("pantheon");
}
@Test
public void startsWithoutArguments() throws Exception {
final File stdout = temporaryFolder.newFile();
final File stderr = temporaryFolder.newFile();
final ProcessBuilder processBuilder =
new ProcessBuilder(pantheonBin.toString()).redirectOutput(stdout).redirectError(stderr);
processBuilder.environment().remove("JAVA_TOOL_OPTIONS");
final Process pantheon = processBuilder.start();
try {
final OkHttpClient client = new OkHttpClient.Builder().build();
waitForPort(JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT);
try (Response jsonRpcResponse =
client
.newCall(
new Request.Builder()
.url("http://localhost:" + JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT)
.post(
RequestBody.create(
MediaType.parse("application/json; charset=utf-8"),
"{\"jsonrpc\":\"2.0\",\"id\":"
+ Json.encode(1)
+ ",\"method\":\"web3_clientVersion\"}"))
.build())
.execute()) {
assertThat(jsonRpcResponse.code()).isEqualTo(HttpURLConnection.HTTP_OK);
}
pantheon.destroy();
assertThat(pantheon.waitFor(10L, TimeUnit.SECONDS)).isTrue();
// When JVM receives SIGTERM it exits 143
assertThat(pantheon.exitValue()).isEqualTo(143);
} finally {
pantheon.destroyForcibly();
pantheon.waitFor();
}
}
private static void waitForPort(final int port) {
Awaitility.await()
.ignoreExceptions()
.until(
() -> {
try (Socket client = new Socket(InetAddress.getLoopbackAddress(), port)) {
return true;
}
});
}
}

@ -1,121 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.HashSet;
import java.util.Set;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.model.BuildResponseItem;
import com.github.dockerjava.core.command.BuildImageResultCallback;
import com.google.common.io.Resources;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class DockerUtils {
private static final Logger LOG = LogManager.getLogger();
/**
* Creates docker image
*
* @return docker image ID
* @throws FileNotFoundException if the Dockerfile can not be located
*/
public static String createPantheonDockerImage(final DockerClient dockerClient, final String name)
throws FileNotFoundException {
File dockerFile;
try {
final String resource = DockerUtils.class.getPackage().getName().replaceAll("\\.", "/");
final URL url = Resources.getResource(resource);
if (url == null) throw new FileNotFoundException("Resource Not Found: " + resource);
dockerFile = findParentDirBySiblingName(new File(url.toURI()), "gradlew.bat");
if (dockerFile == null) throw new FileNotFoundException("File Not Found: " + url);
if (!dockerFile.exists()) throw new FileNotFoundException("File Not Found: " + dockerFile);
} catch (final URISyntaxException e) {
throw new RuntimeException(e);
}
String imageId;
try (BuildImageResultCallback callback =
new BuildImageResultCallback() {
@Override
public void onNext(final BuildResponseItem item) {
LOG.info("createDockerImage log:" + item);
super.onNext(item);
}
}) {
final Set<String> tags = new HashSet<>();
if (name != null) {
tags.add(name);
}
LOG.info("Creating Docker Image for : " + dockerFile);
imageId = dockerClient.buildImageCmd(dockerFile).withTags(tags).exec(callback).awaitImageId();
LOG.info("Created Docker Image [" + imageId + "] for : " + dockerFile);
} catch (final IOException e) {
throw new RuntimeException("Failed to create Docker image for " + dockerFile, e);
}
return imageId;
}
/**
* Creates docker image
*
* @param imageType used in locating the correct Dockerfile on disk. eg "pantheon", "geth"
* @return docker image ID
* @throws FileNotFoundException if the Dockerfile can not be located
*/
public static String createDockerImage(
final DockerClient dockerClient, final String name, final String imageType)
throws FileNotFoundException {
File dockerFile;
try {
final String resource =
DockerUtils.class.getPackage().getName().replaceAll("\\.", "/")
+ "/docker/"
+ imageType
+ "/Dockerfile-"
+ imageType;
final URL url = Resources.getResource(resource);
if (url == null) throw new FileNotFoundException("Resource Not Found: " + resource);
dockerFile = new File(url.toURI());
if (!dockerFile.exists()) throw new FileNotFoundException("File Not Found: " + dockerFile);
} catch (final URISyntaxException e) {
throw new RuntimeException(e);
}
final BuildImageResultCallback callback =
new BuildImageResultCallback() {
@Override
public void onNext(final BuildResponseItem item) {
LOG.info("createDockerImage log:" + item);
super.onNext(item);
}
};
final Set<String> tags = new HashSet<>();
if (name != null) {
tags.add(name);
}
return dockerClient.buildImageCmd(dockerFile).withTags(tags).exec(callback).awaitImageId();
}
public static File findParentDirBySiblingName(final File thisFile, final String siblingName) {
File parent = thisFile.getParentFile();
while (parent != null) {
final File file = new File(parent.getAbsolutePath() + File.separatorChar + siblingName);
if (file.exists()) {
return parent;
}
parent = parent.getParentFile();
}
return null;
}
}

@ -1,65 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.io.IOException;
import java.net.ConnectException;
import io.vertx.core.json.Json;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.awaitility.Awaitility;
/** Helper class that performs common JSON-RPC Admin calls */
public class NodeAdminRpcUtils {
private static final Logger LOG = LogManager.getLogger();
public static JsonArray postMethodArray(final TestClusterNode node, final String method)
throws IOException {
if (node.isBootNode())
throw new IllegalArgumentException("Can't Call JSON-RPC on boot node. node=" + node);
final String id = "123";
final String body =
"{\"jsonrpc\":\"2.0\",\"id\":" + Json.encode(id) + ",\"method\":\"" + method + "\"}";
final JsonObject json = node.executeJsonRpc(null, "POST", body);
return json.getJsonArray("result");
}
public static String postMethodString(final TestClusterNode node, final String method)
throws IOException {
if (node.isBootNode())
throw new IllegalArgumentException("Can't Call JSON-RPC on boot node. node=" + node);
final String id = "123";
final String body =
"{\"jsonrpc\":\"2.0\",\"id\":" + Json.encode(id) + ",\"method\":\"" + method + "\"}";
final JsonObject json = node.executeJsonRpc(null, "POST", body);
return json.getString("result");
}
public static JsonArray getPeersJson(final TestClusterNode node) throws IOException {
return postMethodArray(node, "admin_peers");
}
/** Verify JSON-RPC is accessible. */
public static void testWeb3ClientVersionSuccessful(
final TestClusterNode node, final String prefix) {
Awaitility.await()
.atMost(30, SECONDS)
.ignoreException(ConnectException.class)
.until(
() -> {
final String result = postMethodString(node, "web3_clientVersion");
return result.startsWith(prefix);
});
}
public static void testWeb3ClientVersionGeth(final TestClusterNode node) {
testWeb3ClientVersionSuccessful(node, "Geth/");
}
public static void testWeb3ClientVersionPantheon(final TestClusterNode node) {
testWeb3ClientVersionSuccessful(node, "pantheon/");
}
}

@ -1,494 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import static java.lang.Thread.sleep;
import static org.apache.commons.lang.StringUtils.join;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.command.CreateContainerCmd;
import com.github.dockerjava.api.command.CreateContainerResponse;
import com.github.dockerjava.api.command.ExecCreateCmdResponse;
import com.github.dockerjava.api.command.InspectContainerResponse;
import com.github.dockerjava.api.model.ExposedPort;
import com.github.dockerjava.api.model.InternetProtocol;
import com.github.dockerjava.api.model.Ports;
import com.github.dockerjava.core.DefaultDockerClientConfig;
import com.github.dockerjava.core.DockerClientBuilder;
import com.github.dockerjava.core.command.ExecStartResultCallback;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.LineIterator;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.assertj.core.api.Assertions;
public final class TestCluster implements Closeable {
private static final Logger LOG = LogManager.getLogger();
private final DockerClient dockerClient;
private final List<TestClusterNode> nodes = new ArrayList<>();
private final Map<String, String> imageIds = new HashMap<>();
private final List<String> bootNodes = new ArrayList<>();
private final String clusterPrefix;
/**
* @param clusterPrefix Prefix to be used for all resources (eg data dirs, docker containers) used
* by this cluster. This will make it easier to cleanup after test runs in the case of a
* catastrophic failure where the test can not clean up after itself. Consider something like
* "myTestSuiteName-datetime"
*/
public TestCluster(final String clusterPrefix) {
Assertions.assertThat(clusterPrefix).isNotNull();
this.clusterPrefix = clusterPrefix;
dockerClient =
DockerClientBuilder.getInstance(
DefaultDockerClientConfig.createDefaultConfigBuilder().build())
.build();
}
/** Returns an unmodifiable List of the cluster's nodes. */
public List<TestClusterNode> getNodes() {
return Collections.unmodifiableList(nodes);
}
/**
* Creates a Pantheon boot-node in a docker container for this cluster
*
* @throws FileNotFoundException if the Dockerfile can't be found
*/
public TestClusterNode addPantheonBootDockerNode(final String containerName)
throws FileNotFoundException {
throw new UnsupportedOperationException("Pantheon Boot Notes are not yet supported");
}
/**
* Creates a Pantheon full-node in a docker container for this cluster
*
* @throws FileNotFoundException if the Dockerfile can't be found
*/
public TestClusterNode addPantheonFullDockerNode(final String containerName)
throws FileNotFoundException {
if (bootNodes.isEmpty()) {
throw new IllegalStateException(
"Must result at least 1 boot node in this cluster before added regular nodes");
}
return addPantheonDockerNode(false, containerName, null);
}
/**
* Create a Pantheon Docker container with an optional custom start command.
*
* @param isBootNode true if this node is a boot-only node.
* @param containerName name for container
* @param cmd Optional. Command to override start the container with. NULL will use the default
* from Dockerfile. One use is to be able to start both full-node and boot-node using the same
* image
*/
private TestClusterNode addPantheonDockerNode(
final boolean isBootNode, final String containerName, final String cmd)
throws FileNotFoundException {
final String imageName = clusterPrefix + ":pantheon";
if (!imageIds.containsKey(imageName)) {
// Only Create the image one time
final String imageId = DockerUtils.createPantheonDockerImage(dockerClient, imageName);
imageIds.put(imageName, imageId);
}
final List<String> env = new ArrayList<>();
env.add("bootnodes=" + join(bootNodes, ','));
final String hostName = containerName;
// TODO: Run as non-root user
final CreateContainerCmd createCtrCmd =
dockerClient
.createContainerCmd(imageName)
.withName(containerName)
.withPublishAllPorts(Boolean.TRUE)
.withHostName(hostName)
.withEnv(env)
.withUser("root");
if (cmd != null) {
// Override start command if one was provided
createCtrCmd.withCmd(cmd);
} else {
final List<String> args = new ArrayList<>();
args.add("--bootnodes");
args.add(join(bootNodes, ','));
args.add("--rpc-listen");
args.add("0.0.0.0:8545");
args.add("--genesis");
args.add("/opt/pantheon/genesis.json");
createCtrCmd.withCmd(args);
}
final CreateContainerResponse createCtrResp = createCtrCmd.exec();
final String containerId = createCtrResp.getId();
LOG.info(
"Starting Container: "
+ containerName
+ ", id="
+ containerId
+ ", cmd="
+ join(createCtrCmd.getCmd(), ' '));
dockerClient.startContainerCmd(containerId).exec();
final InspectContainerResponse startedContainer =
dockerClient.inspectContainerCmd(containerId).exec();
final String ipAddress = startedContainer.getNetworkSettings().getIpAddress();
final Ports actualPorts = startedContainer.getNetworkSettings().getPorts();
Ports.Binding hostPort = actualPorts.getBindings().get(new ExposedPort(8545))[0];
final int jsonRpcPort = Integer.valueOf(hostPort.getHostPortSpec());
hostPort = actualPorts.getBindings().get(new ExposedPort(30303, InternetProtocol.UDP))[0];
final int discoveryPort = Integer.valueOf(hostPort.getHostPortSpec());
final String filePath = "/var/lib/pantheon/node01/enode.log";
// TODO: Rename getGethEnode, or make different method for pantheon
final String eNode = getGethEnode(dockerClient, containerId, filePath);
LOG.info("eNode = " + eNode);
final InetSocketAddress discoveryAddress = new InetSocketAddress(ipAddress, 30303);
final InetSocketAddress jsonRpcAddress = new InetSocketAddress(ipAddress, 8545);
final InetSocketAddress hostDiscoveryAddress =
new InetSocketAddress("localhost", discoveryPort);
final InetSocketAddress hostJsonRpcAddress = new InetSocketAddress("localhost", jsonRpcPort);
final TestClusterNode node =
new TestDockerNode(
dockerClient,
hostName,
containerId,
isBootNode,
discoveryAddress,
jsonRpcAddress,
hostDiscoveryAddress,
hostJsonRpcAddress,
eNode);
nodes.add(node);
LOG.info(String.format("Added node : %s", node));
return node;
}
/**
* Creates a Geth boot-node in a docker container for this cluster
*
* @throws FileNotFoundException if the Dockerfile can't be found
*/
public TestClusterNode addGethBootDockerNode(final String containerName)
throws FileNotFoundException {
// TODO: Figure out sh -c syntax so this node is started the same way full nodes are, and
// show up in the same way in ps list.
// TestClusterNode node = addGethDockerNode(containerName, "/bin/sh -c /runBootNode.sh");
final TestClusterNode node = addGethDockerNode(true, containerName, "/runBootNode.sh");
bootNodes.add(node.getEnode());
return node;
}
/**
* Creates a Geth full-node in a docker container for this cluster
*
* @throws FileNotFoundException if the Dockerfile can't be found
*/
public TestClusterNode addGethFullDockerNode(final String containerName)
throws FileNotFoundException {
if (bootNodes.isEmpty()) {
throw new IllegalStateException(
"Must result at least 1 boot node in this cluster before added regular nodes");
}
return addGethDockerNode(false, containerName, null);
}
/**
* Create a Geth Docker container with an optional custom start command.
*
* @param isBootNode true if this node is a boot-only node.
* @param containerName name for container
* @param cmd Optional. Command to override start the container with. NULL will use the default
* from Dockerfile. One use is to be able to start both Geth full-node and boot-node using the
* same image
*/
private TestClusterNode addGethDockerNode(
final boolean isBootNode, final String containerName, final String cmd)
throws FileNotFoundException {
final String imageName = clusterPrefix + ":geth";
if (!imageIds.containsKey(imageName)) {
// Only Create the image one time
final String imageId = DockerUtils.createDockerImage(dockerClient, imageName, "geth");
imageIds.put(imageName, imageId);
}
final List<String> env = new ArrayList<>();
env.add("geth_bootnodes=" + join(bootNodes, ','));
final String hostName = containerName;
// TODO: Run as non-root user
final CreateContainerCmd createCtrCmd =
dockerClient
.createContainerCmd(imageName)
.withName(containerName)
.withPublishAllPorts(Boolean.TRUE)
.withHostName(hostName)
.withEnv(env)
.withUser("root");
if (cmd != null) {
// Override start command if one was provided
createCtrCmd.withCmd(cmd);
} else {
final List<String> args = new ArrayList<>();
args.add("--networkid");
args.add("15"); // Test use networkid 15 so there is no chance of connecting to MainNet
createCtrCmd.withCmd(args);
}
final CreateContainerResponse createCtrResp = createCtrCmd.exec();
final String containerId = createCtrResp.getId();
LOG.info("Starting Container: " + containerName + ", id=" + containerId);
dockerClient.startContainerCmd(containerId).exec();
final InspectContainerResponse startedContainer =
dockerClient.inspectContainerCmd(containerId).exec();
final String ipAddress = startedContainer.getNetworkSettings().getIpAddress();
final Ports actualPorts = startedContainer.getNetworkSettings().getPorts();
Ports.Binding hostPort = actualPorts.getBindings().get(new ExposedPort(8545))[0];
final int jsonRpcPort = Integer.valueOf(hostPort.getHostPortSpec());
hostPort = actualPorts.getBindings().get(new ExposedPort(30303, InternetProtocol.UDP))[0];
final int discoveryPort = Integer.valueOf(hostPort.getHostPortSpec());
final String filePath = "/var/lib/geth/node01/enode.log";
final String eNode = getGethEnode(dockerClient, containerId, filePath);
LOG.info("eNode = " + eNode);
final InetSocketAddress discoveryAddress = new InetSocketAddress(ipAddress, 30303);
final InetSocketAddress jsonRpcAddress = new InetSocketAddress(ipAddress, 8545);
final InetSocketAddress hostDiscoveryAddress =
new InetSocketAddress("localhost", discoveryPort);
final InetSocketAddress hostJsonRpcAddress = new InetSocketAddress("localhost", jsonRpcPort);
final TestClusterNode gethNode =
new TestDockerNode(
dockerClient,
hostName,
containerId,
isBootNode,
discoveryAddress,
jsonRpcAddress,
hostDiscoveryAddress,
hostJsonRpcAddress,
eNode);
nodes.add(gethNode);
LOG.info(String.format("Added node : %s", gethNode));
return gethNode;
}
public static String getGethEnode(
final DockerClient dockerClient, final String containerId, final String filePath) {
final ByteArrayOutputStream stdout = new ByteArrayOutputStream();
final ByteArrayOutputStream stderr = new ByteArrayOutputStream();
try {
// Todo, Poll instead of long sleep
sleep(5000);
final ExecCreateCmdResponse execCreateCmdResponse =
dockerClient
.execCreateCmd(containerId)
.withAttachStdout(true)
.withAttachStderr(true)
.withCmd("/bin/cat", filePath)
.exec();
dockerClient
.execStartCmd(execCreateCmdResponse.getId())
.exec(new ExecStartResultCallback(stdout, stderr))
.awaitCompletion();
} catch (final InterruptedException e) {
throw new RuntimeException("Unable to get Geth Enode.", e);
}
// Todo Validate that output is valid enode format
// Todo validate stderr is empty
return stdout.toString().trim();
}
public TestClusterNode addGethLocalNode(final String gethCmd, final String nodeNum)
throws IOException {
final SimpleDateFormat fmt = new SimpleDateFormat("yyyyMMdd-HHmmss");
final String imageName = "pantheontest-" + fmt.format(new Date()) + ":geth";
final String hostName = "localhost";
final String tmpPath = System.getProperty("java.io.tmpdir");
if (tmpPath == null)
throw new IllegalStateException("System Property 'java.io.tmpdir' must not be null");
final File datadir = new File(tmpPath + "/" + imageName);
datadir.mkdirs();
final int ethPort = Integer.parseInt("304" + nodeNum);
final int jsonRpcPort = Integer.parseInt("85" + nodeNum);
final int discoveryPort = ethPort;
final InetSocketAddress hostDiscoveryAddress = new InetSocketAddress(hostName, ethPort);
final InetSocketAddress hostJsonRpcAddress = new InetSocketAddress(hostName, jsonRpcPort);
final List<String> cmdList = new ArrayList<>();
cmdList.add(gethCmd);
cmdList.add("--datadir");
cmdList.add(datadir.getAbsolutePath());
cmdList.add("--port");
cmdList.add(String.valueOf(ethPort));
cmdList.add("--bootNodes");
cmdList.add(join(bootNodes, ','));
cmdList.add("--rpc");
cmdList.add("--rpcapi");
cmdList.add("eth,web3,admin");
cmdList.add("--rpcaddr");
cmdList.add("0.0.0.0");
cmdList.add("--rpcport");
cmdList.add(String.valueOf(jsonRpcPort));
cmdList.add("rpc");
final File log = new File(datadir, "geth.log");
final ProcessBuilder pb =
new ProcessBuilder(gethCmd)
.directory(datadir)
.redirectErrorStream(true)
.redirectOutput(ProcessBuilder.Redirect.appendTo(log));
LOG.info("CmdList: " + join(cmdList, ' '));
final Process p = pb.start();
Assertions.assertThat(pb.redirectInput()).isEqualTo(ProcessBuilder.Redirect.PIPE);
Assertions.assertThat(pb.redirectOutput().file()).isEqualTo(log);
Assertions.assertThat(p.getInputStream().read()).isEqualTo(-1);
final TestClusterNode gethNode =
new TestGethLocalNode(
datadir,
imageName,
false,
hostDiscoveryAddress,
hostJsonRpcAddress,
hostDiscoveryAddress,
hostJsonRpcAddress,
getEnodeFromLog(log));
nodes.add(gethNode);
LOG.info(String.format("Added node : %s", gethNode));
return gethNode;
}
public String getEnodeFromLog(final File file) throws IOException {
return "enode://578e065716636c51c6d4b991c8299d920c8def1957e5fb2dc1c81d3ccf99a072bdcddad86e081e7f7d085a4bc4dbc2e04fe38c90cba810cee50af751e5e3ac70@192.168.71.128:30301";
// return grepFile(file, "UDP listener up *self=").iterator().next();
}
public Collection<String> grepFile(final File file, final String regex) throws IOException {
final Set<String> results = new HashSet<>();
final LineIterator it = FileUtils.lineIterator(file, "UTF-8");
try {
while (it.hasNext()) {
final String line = it.nextLine();
if (line.matches(regex)) {
results.add(line);
}
}
} finally {
LineIterator.closeQuietly(it);
}
return results;
}
public TestClusterNode addParityNode() {
if (bootNodes.isEmpty()) {
throw new IllegalStateException(
"Must result at least 1 boot node in this cluster before added regular nodes");
}
// TODO: Implement Parity Container Support
throw new UnsupportedOperationException("Add ParityNode is not yet supported");
}
public TestClusterNode addCppEthNode() {
if (bootNodes.isEmpty()) {
throw new IllegalStateException(
"Must result at least 1 boot node in this cluster before added regular nodes");
}
// TODO: Implement CppEth Container Support
throw new UnsupportedOperationException("Add CppEthNode is not yet supported");
}
public TestClusterNode addPantheonNode() {
if (bootNodes.isEmpty()) {
throw new IllegalStateException(
"Must result at least 1 boot node in this cluster before added regular nodes");
}
// TODO: Implement Pantheon Container Support
throw new UnsupportedOperationException("Add PantheonNode is not yet supported");
}
@Override
public void close() throws IOException {
// TODO: If test times are too long, consider creating/destroying nodes in parallel
Exception exToThrow = null;
for (final Iterator<TestClusterNode> it = nodes.iterator(); it.hasNext(); ) {
final TestClusterNode node = it.next();
try {
node.stop();
} catch (final Exception e) {
if (exToThrow == null) exToThrow = e;
LOG.error("Error Stopping node. continuing with close(). node = " + node, e);
}
try {
node.delete();
it.remove(); // Remove from our list of nodes only if it was successfully removed.
} catch (final Exception e) {
if (exToThrow == null) exToThrow = e;
LOG.error("Error deleting node. continuing with close(). node = " + node, e);
}
}
for (final Iterator<String> it = imageIds.keySet().iterator(); it.hasNext(); ) {
final String imageId = it.next();
try {
dockerClient.removeImageCmd(imageId).exec();
it.remove();
} catch (final Exception e) {
if (exToThrow == null) exToThrow = e;
LOG.error("Error removing docker image [" + imageId + "]. continuing with close()", e);
}
}
try {
dockerClient.close();
} catch (final IOException e) {
if (exToThrow == null) exToThrow = e;
LOG.warn("Error closing dockerClient. continuing with close()", e);
}
if (exToThrow != null) {
throw new IOException("Error cleaning up cluster.", exToThrow);
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("TestCluster{");
sb.append(", nodes=\n").append(join(nodes, "\n"));
sb.append("\nimageIds=").append(imageIds);
sb.append('}');
return sb.toString();
}
}

@ -1,177 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.math.BigInteger;
import java.net.InetSocketAddress;
import java.util.Set;
import com.google.common.base.MoreObjects;
import io.vertx.core.json.JsonObject;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.assertj.core.api.Assertions;
public abstract class TestClusterNode {
private static final Logger LOG = LogManager.getLogger();
private static final MediaType JSON = MediaType.parse("application/json; charset=utf-8");
protected final String nodeName;
protected final boolean isBootNode;
protected final InetSocketAddress discoveryAddress;
protected final InetSocketAddress jsonRpcAddress;
protected final InetSocketAddress hostDiscoveryAddress;
protected final InetSocketAddress hostJsonRpcAddress;
protected final String enode;
public TestClusterNode(
final String nodeName,
final boolean isBootNode,
final InetSocketAddress discoveryAddress,
final InetSocketAddress jsonRpcAddress,
final InetSocketAddress hostDiscoveryAddress,
final InetSocketAddress hostJsonRpcAddress,
final String enode) {
Assertions.assertThat(enode).isNotNull();
this.nodeName = nodeName;
this.isBootNode = isBootNode;
this.discoveryAddress = discoveryAddress;
this.jsonRpcAddress = jsonRpcAddress;
this.hostDiscoveryAddress = hostDiscoveryAddress;
this.hostJsonRpcAddress = hostJsonRpcAddress;
this.enode = enode;
}
public String getNodeName() {
return nodeName;
}
public boolean isBootNode() {
return isBootNode;
}
public InetSocketAddress getDiscoveryAddress() {
return discoveryAddress;
}
public InetSocketAddress getJsonRpcAddress() {
return jsonRpcAddress;
}
public InetSocketAddress getHostDiscoveryAddress() {
return hostDiscoveryAddress;
}
public InetSocketAddress getHostJsonRpcAddress() {
return hostJsonRpcAddress;
}
public String getEnode() {
return enode;
}
/**
* TODO: JSON WIZARDS! I bet there is a better way to do this. Pro Tips? TODO: JSON WIZARDS!
* Should we have a JSON help lib for this stuff? or do we already have one?
*
* <p>Execute JsonRpc method against this node. Assumptions: - body is well formed. - HTTP
* Response code is 200. Exception is thrown on any other response code - HTTP Response is valid
* JSON-RPC value
*
* @param method HTTP Method. eg GET POST
* @return HTTP Response Body as {@link JsonObject}
*/
public JsonObject executeJsonRpc(final String path, final String method, final String body)
throws IOException {
final OkHttpClient client = new OkHttpClient();
// TODO: Should this be an incremented or random number?
final String id = "123";
final RequestBody reqBody = RequestBody.create(JSON, body);
String myPath = (path != null) ? path : "";
if (!myPath.startsWith("/")) myPath = "/" + myPath;
final String baseUrl =
"http://"
+ getJsonRpcAddress().getHostName()
+ ":"
+ getJsonRpcAddress().getPort()
+ myPath;
final Request request = new Request.Builder().method(method, reqBody).url(baseUrl).build();
LOG.debug("request:" + request);
try (Response resp = client.newCall(request).execute()) {
LOG.debug("response head: {}", resp);
assertThat(resp.code())
.describedAs("Error processing request\nrequest=[%s]\nesponse=[%s]", request, resp)
.isEqualTo(200);
// Check general format of result
assertThat(resp.body())
.describedAs("Error processing request\nrequest=[%s]\nesponse=[%s]", request, resp)
.isNotNull();
final JsonObject json = new JsonObject(resp.body().string());
// TODO: assertNoErrors
assertValidJsonRpcResult(json, id);
LOG.debug("response body: {}", json);
return json;
}
}
/** JSON helper method */
protected static void assertValidJsonRpcResult(final JsonObject json, final Object id) {
// Check all expected fieldnames are set
final Set<String> fieldNames = json.fieldNames();
assertThat(fieldNames.size()).isEqualTo(3);
assertThat(fieldNames.contains("id")).isTrue();
assertThat(fieldNames.contains("jsonrpc")).isTrue();
assertThat(fieldNames.contains("result")).isTrue();
// Check standard field values
assertIdMatches(json, id);
assertThat(json.getString("jsonrpc")).isEqualTo("2.0");
}
/** JSON helper method */
protected static void assertIdMatches(final JsonObject json, final Object expectedId) {
final Object actualId = json.getValue("id");
if (expectedId == null) {
assertThat(actualId).isNull();
return;
}
assertThat(expectedId)
.isInstanceOfAny(
String.class, Integer.class, Long.class, Float.class, Double.class, BigInteger.class);
assertThat(actualId).isInstanceOf(expectedId.getClass());
assertThat(actualId.toString()).isEqualTo(expectedId.toString());
}
/** Start the node */
public abstract void start();
/** Stop the node */
public abstract void stop();
/** Delete the node and all related data from disk */
public abstract void delete();
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("nodeName", nodeName)
.add("isBootNode", isBootNode)
.add("discoveryAddress", discoveryAddress)
.add("jsonRpcAddress", jsonRpcAddress)
.add("hostDiscoveryAddress", hostDiscoveryAddress)
.add("hostJsonRpcAddress", hostJsonRpcAddress)
.add("enode", enode)
.toString();
}
}

@ -1,74 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import java.net.InetSocketAddress;
import com.github.dockerjava.api.DockerClient;
import com.google.common.base.MoreObjects;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class TestDockerNode extends TestClusterNode {
private static final Logger LOG = LogManager.getLogger();
protected String containerId;
protected DockerClient dockerClient;
public TestDockerNode(
final DockerClient dockerClient,
final String nodeName,
final String containerId,
final boolean isBootNode,
final InetSocketAddress discoveryAddress,
final InetSocketAddress jsonRpcAddress,
final InetSocketAddress hostDiscoveryAddress,
final InetSocketAddress hostJsonRpcAddress,
final String enode) {
super(
nodeName,
isBootNode,
discoveryAddress,
jsonRpcAddress,
hostDiscoveryAddress,
hostJsonRpcAddress,
enode);
this.containerId = containerId;
this.dockerClient = dockerClient;
}
public String getContainerId() {
return containerId;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("nodeName", nodeName)
.add("isBootNode", isBootNode)
.add("discoveryAddress", discoveryAddress)
.add("jsonRpcAddress", jsonRpcAddress)
.add("hostDiscoveryAddress", hostDiscoveryAddress)
.add("hostJsonRpcAddress", hostJsonRpcAddress)
.add("enode", enode)
.add("containerId", containerId)
.add("dockerClient", dockerClient)
.toString();
}
@Override
public void start() {
LOG.info("Calling stop on node {}", this);
dockerClient.startContainerCmd(containerId).exec();
}
@Override
public void stop() {
LOG.info("Calling stop on node {}", this);
dockerClient.stopContainerCmd(containerId).exec();
}
@Override
public void delete() {
LOG.info("Calling delete on node {}", this);
dockerClient.removeContainerCmd(containerId).withForce(true).exec();
}
}

@ -1,71 +0,0 @@
package net.consensys.pantheon.tests.cluster;
import java.io.File;
import java.net.InetSocketAddress;
import com.google.common.base.MoreObjects;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class TestGethLocalNode extends TestClusterNode {
private static final Logger LOG = LogManager.getLogger();
protected File dataDir;
public TestGethLocalNode(
final File dataDir,
final String nodeName,
final boolean isBootNode,
final InetSocketAddress discoveryAddress,
final InetSocketAddress jsonRpcAddress,
final InetSocketAddress hostDiscoveryAddress,
final InetSocketAddress hostJsonRpcAddress,
final String enode) {
super(
nodeName,
isBootNode,
discoveryAddress,
jsonRpcAddress,
hostDiscoveryAddress,
hostJsonRpcAddress,
enode);
this.dataDir = dataDir;
}
@Override
public void start() {
// TODO: Implement Start()
LOG.warn("TODO: Implement Start()");
// LOG.info("Calling stop on node {}", this);
}
@Override
public void stop() {
// TODO: Implement Stop()
LOG.warn("TODO: Implement Stop()");
// LOG.info("Calling stop on node {}", this);
}
@Override
public void delete() {
// TODO: Implement Delete()
LOG.warn("TODO: Implement DELETE()");
// LOG.info("Calling delete on node {}", this);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("nodeName", nodeName)
.add("isBootNode", isBootNode)
.add("discoveryAddress", discoveryAddress)
.add("jsonRpcAddress", jsonRpcAddress)
.add("hostDiscoveryAddress", hostDiscoveryAddress)
.add("hostJsonRpcAddress", hostJsonRpcAddress)
.add("enode", enode)
.add("dataDir", dataDir)
.toString();
}
}

@ -1,25 +0,0 @@
FROM ethereum/client-go:alltools-stable
ENV NODE_NAME=node01
ENV DATA_DIR=/var/lib/geth/$NODE_NAME
ENV HOME=/root
RUN mkdir -p $DATA_DIR
ADD genesis.json $DATA_DIR/genesis.json
ADD gethUtils.sh /gethUtils.sh
ADD run.sh /run.sh
ADD runBootNode.sh /runBootNode.sh
RUN echo "if [ -f '/gethUtils.sh' ]; then echo 'Sourcing /gethUtils.sh'; source /gethUtils.sh; fi\n" >> $HOME/.bashrc
RUN sed -i s@REPLACE_NODE_NAME@$NODE_NAME@ /gethUtils.sh
RUN sed -i s@REPLACE_DATA_DIR@$DATA_DIR@ /gethUtils.sh
WORKDIR $HOME/
ENTRYPOINT ["/run.sh"]
# List Exposed Ports
EXPOSE 8084 8545 30303 30303/udp

@ -1,45 +0,0 @@
FROM ubuntu:18.04
ENV PATH /go-ethereum/build/bin:$PATH
RUN useradd --create-home --user-group -u 999 appuser
RUN apt-get update
RUN apt-get install -y build-essential golang git net-tools iputils-ping netcat curl
RUN apt-get install -y vim emacs
WORKDIR /
RUN git clone https://github.com/ethereum/go-ethereum
# && git checkout tags/v1.8.7
WORKDIR /go-ethereum
RUN make all
RUN mkdir -p /ethereum/data/geth
ADD genesis.json /ethereum/genesis.json
ADD gethUtils.sh /gethUtils.sh
ADD run.sh /run.sh
ADD runBootNode.sh /runBootNode.sh
ADD bash_aliases /home/appuser/.bash_aliases
RUN chmod +x /run.sh /runBootNode.sh /gethUtils.sh /home/appuser/.bash_aliases
RUN chown appuser:appuser /run.sh /runBootNode.sh /gethUtils.sh /home/appuser/.bash_aliases
# store cmd to ~/.bash_history every time the prompt is displayed, instead of waiting for a clean exit from the shell
RUN echo "export PROMPT_COMMAND=\"history -a\"" >> /root/.profile
RUN cp /root/.profile /home/appuser/
RUN cp /root/.bashrc /home/appuser/
#RUN echo "if [ -f \"~/.bash_aliases\" ]; then echo 'Sourcing \"~/.bash_aliases\"'; source \"~/.bash_aliases\"; fi\n" >> /home/appuser/.bashrc
RUN echo "if [ -f '/gethUtils.sh' ]; then echo 'Sourcing /gethUtils.sh'; source /gethUtils.sh; fi\n" >> /home/appuser/.bashrc
WORKDIR /go-ethereum
RUN chown -R appuser:appuser /ethereum
RUN chown -R appuser:appuser /go-ethereum
USER appuser
CMD /run.sh
# List Exposed Ports
EXPOSE 8084 8545 30303 30303/udp

@ -1,25 +0,0 @@
{
"config": {
"chainId": 15,
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0,
"ethash": {
}
},
"alloc" : {
"0x0000000000000000000000000000000000000001": {"balance": "111111111"},
"0x0000000000000000000000000000000000000002": {"balance": "222222222"}
},
"coinbase" : "0x0000000000000000000000000000000000000000",
"difficulty" : "0x0000001",
"extraData" : "",
"gasLimit" : "0x2fefd8",
"nonce" : "0x0000000000000107",
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp" : "0x00"
}

@ -1,42 +0,0 @@
#!/bin/sh
function waitForLogLine() {
# Note, this function will wait forever
local myStr=$1
local myFile=$2
until grep -q "${myStr}" "${myFile}"
do
# echo "Sleeping 0.1s"
sleep 0.1s
done
}
function logEnode() {
waitForLogLine 'UDP listener up *self=' $datadir/geth.log
# echo "ip_addr=\$(getent hosts boot | awk '{ print \$1 }')"
ip_addr=$(hostname -i)
# ip_addr="192.168.71.128"
echo "ip_addr=$ip_addr"
echo "enode=\$( grep enode \$datadir/geth.log | tail -n 1 | sed s/^.*enode:/enode:/ | sed \"s/\[\:\:\]/$ip_addr/g\" )"
enode=$( grep enode $datadir/geth.log | tail -n 1 | sed s/^.*enode:/enode:/ | sed "s/\[\:\:\]/$ip_addr/g" )
echo "enode=$enode"
echo $enode > $BOOTENODEFILE
echo cat file
cat $BOOTENODEFILE
}
function sleepforever() {
while true; do
sleep 9999d
done
}
datadir=REPLACE_DATA_DIR
BOOTENODEFILE=$datadir/enode.log
# Note: aliases dependent on $nodedir need to be loaded here instead of ~/.bash_aliases
# because ~/.bash_aliases is loaded before this script is sourced.
alias geth="geth --datadir=$datadir "
alias peers="geth attach --exec 'admin.peers'"

@ -1,11 +0,0 @@
#!/bin/sh
source /gethUtils.sh
cd /go-ethereum
geth --datadir=$datadir init $datadir/genesis.json
geth -verbosity 6 --datadir=$datadir --syncmode "full" --rpc --rpcapi eth,web3,admin --rpcaddr 0.0.0.0 --bootnodes="$geth_bootnodes" --networkid 15 &> $datadir/geth.log &
logEnode
sleepforever

@ -1,13 +0,0 @@
#!/bin/sh
echo HI!
source /gethUtils.sh
BOOTNODE_CMD=bootnode
$BOOTNODE_CMD --genkey=$nodedir/boot.key
$BOOTNODE_CMD --nodekey=$nodedir/boot.key --addr=:30303 &> $datadir/geth.log &
logEnode
sleepforever
Loading…
Cancel
Save