From dce901f5c377a1ffce346b1d6453e1e0a11f6071 Mon Sep 17 00:00:00 2001 From: Raymond Cheng Date: Sun, 1 Apr 2018 11:22:11 -0700 Subject: [PATCH] snapshot 2018/04/01 --- .circleci/config.yml | 82 +++ .dockerignore | 1 + .gitignore | 22 + .gitmodules | 0 .rustfmt.toml | 54 ++ Cargo.toml | 41 ++ LICENSE | 202 +++++++ Makefile.toml | 108 ++++ README.md | 154 +++++ clients/Makefile.toml | 1 + clients/token/Cargo.toml | 20 + clients/token/Makefile.toml | 1 + clients/token/src/main.rs | 133 +++++ clients/utils/Cargo.toml | 15 + clients/utils/Makefile.toml | 1 + clients/utils/src/benchmark.rs | 230 ++++++++ clients/utils/src/lib.rs | 12 + clients/utils/src/macros.rs | 132 +++++ common/Cargo.toml | 18 + common/Makefile.toml | 4 + common/src/error.rs | 39 ++ common/src/hex_encoded.rs | 86 +++ common/src/lib.rs | 19 + common/src/profiling.rs | 98 ++++ common/src/random.rs | 32 + common/src/serializer.rs | 200 +++++++ compute/Cargo.toml | 40 ++ compute/Makefile.toml | 1 + compute/api/Cargo.toml | 18 + compute/api/Makefile.toml | 1 + compute/api/build.rs | 17 + compute/api/src/compute_web3.proto | 19 + compute/api/src/lib.rs | 8 + compute/build.rs | 6 + compute/src/handlers.rs | 53 ++ compute/src/ias.rs | 166 ++++++ compute/src/instrumentation.rs | 105 ++++ compute/src/main.rs | 205 +++++++ compute/src/server.rs | 468 +++++++++++++++ consensus/Cargo.toml | 36 ++ consensus/Makefile.toml | 1 + consensus/api/Cargo.toml | 18 + consensus/api/Makefile.toml | 1 + consensus/api/build.rs | 15 + consensus/api/src/consensus.proto | 54 ++ consensus/api/src/lib.rs | 8 + consensus/benches/benchmarks.rs | 83 +++ consensus/build.rs | 15 + consensus/src/ekidenmint.rs | 159 +++++ consensus/src/errors.rs | 52 ++ consensus/src/lib.rs | 102 ++++ consensus/src/main.rs | 67 +++ consensus/src/rpc.rs | 153 +++++ consensus/src/state.rs | 21 + consensus/src/tendermint.proto | 66 +++ consensus/src/tendermint.rs | 51 ++ consensus/tests/integration_test.rs | 138 +++++ contracts/key-manager/Cargo.toml | 22 + contracts/key-manager/Makefile.toml | 1 + contracts/key-manager/api/Cargo.toml | 15 + contracts/key-manager/api/build.rs | 6 + contracts/key-manager/api/src/api.proto | 16 + contracts/key-manager/api/src/api.rs | 11 + contracts/key-manager/api/src/lib.rs | 12 + contracts/key-manager/build.rs | 6 + contracts/key-manager/client/Cargo.toml | 16 + contracts/key-manager/client/src/client.rs | 136 +++++ contracts/key-manager/client/src/lib.rs | 41 ++ contracts/key-manager/src/key_store.rs | 97 +++ contracts/key-manager/src/lib.rs | 43 ++ contracts/token/Cargo.toml | 18 + contracts/token/Makefile.toml | 1 + contracts/token/api/Cargo.toml | 15 + contracts/token/api/build.rs | 6 + contracts/token/api/src/api.proto | 30 + contracts/token/api/src/api.rs | 15 + contracts/token/api/src/lib.rs | 12 + contracts/token/build.rs | 14 + contracts/token/src/lib.rs | 68 +++ contracts/token/src/token_contract.rs | 149 +++++ contracts/token/tests/integration_test.rs | 6 + core/Makefile.toml | 1 + core/common/Cargo.toml | 12 + core/common/Makefile.toml | 4 + core/common/src/lib.rs | 15 + core/edl/Cargo.toml | 14 + core/edl/Makefile.toml | 1 + core/edl/src/core.edl | 4 + core/edl/src/enclave.lds | 9 + core/edl/src/enclave.xml | 12 + core/edl/src/lib.rs | 16 + core/trusted/Cargo.toml | 13 + core/trusted/Makefile.toml | 5 + core/trusted/src/lib.rs | 22 + core/untrusted/Cargo.toml | 15 + core/untrusted/Makefile.toml | 1 + core/untrusted/src/lib.rs | 22 + db/Makefile.toml | 1 + db/edl/Cargo.toml | 10 + db/edl/Makefile.toml | 1 + db/edl/src/db.edl | 34 ++ db/edl/src/lib.rs | 6 + db/trusted/Cargo.toml | 20 + db/trusted/Makefile.toml | 5 + db/trusted/benches/benchmarks.rs | 152 +++++ db/trusted/build.rs | 11 + db/trusted/src/crypto.rs | 79 +++ db/trusted/src/database.proto | 29 + db/trusted/src/diffs.rs | 51 ++ db/trusted/src/ecalls.rs | 80 +++ db/trusted/src/handle.rs | 137 +++++ db/trusted/src/lib.rs | 53 ++ db/trusted/src/schema/descriptor.rs | 212 +++++++ db/trusted/src/schema/macros.rs | 116 ++++ db/trusted/src/schema/mod.rs | 88 +++ db/untrusted/Cargo.toml | 18 + db/untrusted/Makefile.toml | 1 + db/untrusted/src/ecall_proxy.rs | 38 ++ db/untrusted/src/enclave.rs | 129 ++++ db/untrusted/src/lib.rs | 11 + docker/deployment/Dockerfile.build | 35 ++ docker/deployment/Dockerfile.runtime | 9 + docker/deployment/build-images.sh | 21 + docker/development/Dockerfile | 10 + docker/testing/Dockerfile | 34 ++ docs/contributing.md | 17 + docs/database.md | 104 ++++ docs/enclave-backtrace.md | 6 + docs/enclave-identity.md | 88 +++ docs/hw-benchmarking.md | 14 + docs/profiling.md | 63 ++ docs/rpc-drawio.xml | 1 + docs/rpc.md | 163 ++++++ docs/rpc.svg | 2 + docs/tracing.md | 22 + enclave/Makefile.toml | 1 + enclave/common/Cargo.toml | 23 + enclave/common/Makefile.toml | 4 + enclave/common/build.rs | 11 + enclave/common/src/enclave_identity.proto | 28 + enclave/common/src/identity.rs | 42 ++ enclave/common/src/lib.rs | 22 + enclave/common/src/quote.rs | 152 +++++ enclave/edl/Cargo.toml | 6 + enclave/edl/Makefile.toml | 1 + enclave/edl/src/identity.edl | 50 ++ enclave/edl/src/lib.rs | 6 + enclave/trusted/Cargo.toml | 16 + enclave/trusted/Makefile.toml | 5 + enclave/trusted/src/crypto.rs | 3 + enclave/trusted/src/identity.rs | 248 ++++++++ enclave/trusted/src/lib.rs | 37 ++ enclave/trusted/src/utils.rs | 86 +++ enclave/untrusted/Cargo.toml | 20 + enclave/untrusted/Makefile.toml | 1 + enclave/untrusted/build.rs | 5 + enclave/untrusted/src/ecall_proxy.rs | 31 + enclave/untrusted/src/enclave.rs | 47 ++ enclave/untrusted/src/identity.rs | 250 ++++++++ enclave/untrusted/src/lib.rs | 23 + keys/attestation/client.pfx | Bin 0 -> 2397 bytes keys/attestation/spid | 1 + keys/private.pem | 39 ++ rpc/Makefile.toml | 1 + rpc/client/Cargo.toml | 31 + rpc/client/Makefile.toml | 4 + rpc/client/src/backend/base.rs | 22 + rpc/client/src/backend/mod.rs | 22 + rpc/client/src/backend/web3.rs | 236 ++++++++ rpc/client/src/client.rs | 553 ++++++++++++++++++ rpc/client/src/future.rs | 29 + rpc/client/src/lib.rs | 31 + rpc/client/src/macros.rs | 88 +++ rpc/client/src/secure_channel.rs | 221 +++++++ rpc/common/Cargo.toml | 21 + rpc/common/Makefile.toml | 4 + rpc/common/build.rs | 15 + rpc/common/src/client.rs | 32 + rpc/common/src/enclave_rpc.proto | 116 ++++ rpc/common/src/lib.rs | 23 + rpc/common/src/macros.rs | 67 +++ rpc/common/src/protocol.rs | 6 + rpc/common/src/reflection.rs | 8 + rpc/common/src/secure_channel.rs | 283 +++++++++ rpc/edl/Cargo.toml | 10 + rpc/edl/Makefile.toml | 1 + rpc/edl/src/lib.rs | 6 + rpc/edl/src/rpc.edl | 24 + rpc/trusted/Cargo.toml | 27 + rpc/trusted/Makefile.toml | 5 + rpc/trusted/benches/benchmarks.rs | 287 +++++++++ rpc/trusted/src/client.rs | 67 +++ rpc/trusted/src/dispatcher.rs | 326 +++++++++++ rpc/trusted/src/error.rs | 21 + rpc/trusted/src/lib.rs | 34 ++ rpc/trusted/src/macros.rs | 51 ++ rpc/trusted/src/request.rs | 109 ++++ rpc/trusted/src/response.rs | 86 +++ rpc/trusted/src/secure_channel.rs | 336 +++++++++++ rpc/trusted/src/untrusted.rs | 96 +++ rpc/untrusted/Cargo.toml | 22 + rpc/untrusted/Makefile.toml | 1 + rpc/untrusted/build.rs | 5 + rpc/untrusted/src/ecall_proxy.rs | 13 + rpc/untrusted/src/enclave.rs | 107 ++++ rpc/untrusted/src/lib.rs | 34 ++ rpc/untrusted/src/ocall_proxy.rs | 86 +++ rpc/untrusted/src/router.rs | 90 +++ scripts/benchmark.py | 109 ++++ scripts/make-release.py | 276 +++++++++ scripts/parse_enclave.py | 499 ++++++++++++++++ scripts/run_contract.sh | 16 + scripts/sgx-enter-hw.sh | 36 ++ scripts/sgx-enter.sh | 34 ++ scripts/show-profile.py | 72 +++ scripts/start-aesmd.sh | 1 + scripts/tendermint-clear.sh | 20 + scripts/tendermint-start.sh | 29 + testnet/contract_benchmarks/.gitignore | 2 + testnet/contract_benchmarks/Makefile | 53 ++ testnet/contract_benchmarks/README.md | 66 +++ testnet/contract_benchmarks/benchmark.sh | 106 ++++ testnet/contract_benchmarks/cluster.yaml | 265 +++++++++ testnet/ethermint/Makefile | 14 + testnet/ethermint/benchmark.js | 32 + testnet/ethermint/benchmark.sh | 19 + testnet/ethermint/ethermint.yaml | 280 +++++++++ testnet/tendermint/.gitignore | 8 + testnet/tendermint/Makefile | 52 ++ testnet/tendermint/README.md | 105 ++++ .../tendermint/authorize_security_groups.sh | 12 + testnet/tendermint/cmd.sh | 5 + testnet/tendermint/cmd_serial.sh | 4 + testnet/tendermint/control/.keep | 1 + testnet/tendermint/create_known_hosts.sh | 13 + testnet/tendermint/create_ssh_config.sh | 26 + testnet/tendermint/create_validator_keys.sh | 21 + testnet/tendermint/get_ips.sh | 11 + testnet/tendermint/keys/.keep | 1 + testnet/tendermint/run_servers.sh | 20 + testnet/tendermint/send.sh | 5 + testnet/tendermint/start_control.sh | 3 + testnet/tendermint/upload_validator_keys.sh | 5 + testnet/tendermint/validators/.keep | 1 + tools/Cargo.toml | 25 + tools/Makefile.toml | 1 + tools/bin/main.rs | 242 ++++++++ tools/src/cargo.rs | 236 ++++++++ tools/src/contract.rs | 371 ++++++++++++ tools/src/lib.rs | 23 + tools/src/utils.rs | 365 ++++++++++++ xargo/Xargo.toml.template | 30 + xargo/x86_64-unknown-linux-sgx.json | 31 + 253 files changed, 15006 insertions(+) create mode 100644 .circleci/config.yml create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .rustfmt.toml create mode 100644 Cargo.toml create mode 100644 LICENSE create mode 100644 Makefile.toml create mode 100644 README.md create mode 100644 clients/Makefile.toml create mode 100644 clients/token/Cargo.toml create mode 100644 clients/token/Makefile.toml create mode 100644 clients/token/src/main.rs create mode 100644 clients/utils/Cargo.toml create mode 100644 clients/utils/Makefile.toml create mode 100644 clients/utils/src/benchmark.rs create mode 100644 clients/utils/src/lib.rs create mode 100644 clients/utils/src/macros.rs create mode 100644 common/Cargo.toml create mode 100644 common/Makefile.toml create mode 100644 common/src/error.rs create mode 100644 common/src/hex_encoded.rs create mode 100644 common/src/lib.rs create mode 100644 common/src/profiling.rs create mode 100644 common/src/random.rs create mode 100644 common/src/serializer.rs create mode 100644 compute/Cargo.toml create mode 100644 compute/Makefile.toml create mode 100644 compute/api/Cargo.toml create mode 100644 compute/api/Makefile.toml create mode 100644 compute/api/build.rs create mode 100644 compute/api/src/compute_web3.proto create mode 100644 compute/api/src/lib.rs create mode 100644 compute/build.rs create mode 100644 compute/src/handlers.rs create mode 100644 compute/src/ias.rs create mode 100644 compute/src/instrumentation.rs create mode 100644 compute/src/main.rs create mode 100644 compute/src/server.rs create mode 100644 consensus/Cargo.toml create mode 100644 consensus/Makefile.toml create mode 100644 consensus/api/Cargo.toml create mode 100644 consensus/api/Makefile.toml create mode 100644 consensus/api/build.rs create mode 100644 consensus/api/src/consensus.proto create mode 100644 consensus/api/src/lib.rs create mode 100644 consensus/benches/benchmarks.rs create mode 100644 consensus/build.rs create mode 100644 consensus/src/ekidenmint.rs create mode 100644 consensus/src/errors.rs create mode 100644 consensus/src/lib.rs create mode 100644 consensus/src/main.rs create mode 100644 consensus/src/rpc.rs create mode 100644 consensus/src/state.rs create mode 100644 consensus/src/tendermint.proto create mode 100644 consensus/src/tendermint.rs create mode 100644 consensus/tests/integration_test.rs create mode 100644 contracts/key-manager/Cargo.toml create mode 100644 contracts/key-manager/Makefile.toml create mode 100644 contracts/key-manager/api/Cargo.toml create mode 100644 contracts/key-manager/api/build.rs create mode 100644 contracts/key-manager/api/src/api.proto create mode 100644 contracts/key-manager/api/src/api.rs create mode 100644 contracts/key-manager/api/src/lib.rs create mode 100644 contracts/key-manager/build.rs create mode 100644 contracts/key-manager/client/Cargo.toml create mode 100644 contracts/key-manager/client/src/client.rs create mode 100644 contracts/key-manager/client/src/lib.rs create mode 100644 contracts/key-manager/src/key_store.rs create mode 100644 contracts/key-manager/src/lib.rs create mode 100644 contracts/token/Cargo.toml create mode 100644 contracts/token/Makefile.toml create mode 100644 contracts/token/api/Cargo.toml create mode 100644 contracts/token/api/build.rs create mode 100644 contracts/token/api/src/api.proto create mode 100644 contracts/token/api/src/api.rs create mode 100644 contracts/token/api/src/lib.rs create mode 100644 contracts/token/build.rs create mode 100644 contracts/token/src/lib.rs create mode 100644 contracts/token/src/token_contract.rs create mode 100644 contracts/token/tests/integration_test.rs create mode 100644 core/Makefile.toml create mode 100644 core/common/Cargo.toml create mode 100644 core/common/Makefile.toml create mode 100644 core/common/src/lib.rs create mode 100644 core/edl/Cargo.toml create mode 100644 core/edl/Makefile.toml create mode 100644 core/edl/src/core.edl create mode 100644 core/edl/src/enclave.lds create mode 100644 core/edl/src/enclave.xml create mode 100644 core/edl/src/lib.rs create mode 100644 core/trusted/Cargo.toml create mode 100644 core/trusted/Makefile.toml create mode 100644 core/trusted/src/lib.rs create mode 100644 core/untrusted/Cargo.toml create mode 100644 core/untrusted/Makefile.toml create mode 100644 core/untrusted/src/lib.rs create mode 100644 db/Makefile.toml create mode 100644 db/edl/Cargo.toml create mode 100644 db/edl/Makefile.toml create mode 100644 db/edl/src/db.edl create mode 100644 db/edl/src/lib.rs create mode 100644 db/trusted/Cargo.toml create mode 100644 db/trusted/Makefile.toml create mode 100644 db/trusted/benches/benchmarks.rs create mode 100644 db/trusted/build.rs create mode 100644 db/trusted/src/crypto.rs create mode 100644 db/trusted/src/database.proto create mode 100644 db/trusted/src/diffs.rs create mode 100644 db/trusted/src/ecalls.rs create mode 100644 db/trusted/src/handle.rs create mode 100644 db/trusted/src/lib.rs create mode 100644 db/trusted/src/schema/descriptor.rs create mode 100644 db/trusted/src/schema/macros.rs create mode 100644 db/trusted/src/schema/mod.rs create mode 100644 db/untrusted/Cargo.toml create mode 100644 db/untrusted/Makefile.toml create mode 100644 db/untrusted/src/ecall_proxy.rs create mode 100644 db/untrusted/src/enclave.rs create mode 100644 db/untrusted/src/lib.rs create mode 100644 docker/deployment/Dockerfile.build create mode 100644 docker/deployment/Dockerfile.runtime create mode 100755 docker/deployment/build-images.sh create mode 100644 docker/development/Dockerfile create mode 100644 docker/testing/Dockerfile create mode 100644 docs/contributing.md create mode 100644 docs/database.md create mode 100644 docs/enclave-backtrace.md create mode 100644 docs/enclave-identity.md create mode 100644 docs/hw-benchmarking.md create mode 100644 docs/profiling.md create mode 100644 docs/rpc-drawio.xml create mode 100644 docs/rpc.md create mode 100644 docs/rpc.svg create mode 100644 docs/tracing.md create mode 100644 enclave/Makefile.toml create mode 100644 enclave/common/Cargo.toml create mode 100644 enclave/common/Makefile.toml create mode 100644 enclave/common/build.rs create mode 100644 enclave/common/src/enclave_identity.proto create mode 100644 enclave/common/src/identity.rs create mode 100644 enclave/common/src/lib.rs create mode 100644 enclave/common/src/quote.rs create mode 100644 enclave/edl/Cargo.toml create mode 100644 enclave/edl/Makefile.toml create mode 100644 enclave/edl/src/identity.edl create mode 100644 enclave/edl/src/lib.rs create mode 100644 enclave/trusted/Cargo.toml create mode 100644 enclave/trusted/Makefile.toml create mode 100644 enclave/trusted/src/crypto.rs create mode 100644 enclave/trusted/src/identity.rs create mode 100644 enclave/trusted/src/lib.rs create mode 100644 enclave/trusted/src/utils.rs create mode 100644 enclave/untrusted/Cargo.toml create mode 100644 enclave/untrusted/Makefile.toml create mode 100644 enclave/untrusted/build.rs create mode 100644 enclave/untrusted/src/ecall_proxy.rs create mode 100644 enclave/untrusted/src/enclave.rs create mode 100644 enclave/untrusted/src/identity.rs create mode 100644 enclave/untrusted/src/lib.rs create mode 100644 keys/attestation/client.pfx create mode 100644 keys/attestation/spid create mode 100644 keys/private.pem create mode 100644 rpc/Makefile.toml create mode 100644 rpc/client/Cargo.toml create mode 100644 rpc/client/Makefile.toml create mode 100644 rpc/client/src/backend/base.rs create mode 100644 rpc/client/src/backend/mod.rs create mode 100644 rpc/client/src/backend/web3.rs create mode 100644 rpc/client/src/client.rs create mode 100644 rpc/client/src/future.rs create mode 100644 rpc/client/src/lib.rs create mode 100644 rpc/client/src/macros.rs create mode 100644 rpc/client/src/secure_channel.rs create mode 100644 rpc/common/Cargo.toml create mode 100644 rpc/common/Makefile.toml create mode 100644 rpc/common/build.rs create mode 100644 rpc/common/src/client.rs create mode 100644 rpc/common/src/enclave_rpc.proto create mode 100644 rpc/common/src/lib.rs create mode 100644 rpc/common/src/macros.rs create mode 100644 rpc/common/src/protocol.rs create mode 100644 rpc/common/src/reflection.rs create mode 100644 rpc/common/src/secure_channel.rs create mode 100644 rpc/edl/Cargo.toml create mode 100644 rpc/edl/Makefile.toml create mode 100644 rpc/edl/src/lib.rs create mode 100644 rpc/edl/src/rpc.edl create mode 100644 rpc/trusted/Cargo.toml create mode 100644 rpc/trusted/Makefile.toml create mode 100644 rpc/trusted/benches/benchmarks.rs create mode 100644 rpc/trusted/src/client.rs create mode 100644 rpc/trusted/src/dispatcher.rs create mode 100644 rpc/trusted/src/error.rs create mode 100644 rpc/trusted/src/lib.rs create mode 100644 rpc/trusted/src/macros.rs create mode 100644 rpc/trusted/src/request.rs create mode 100644 rpc/trusted/src/response.rs create mode 100644 rpc/trusted/src/secure_channel.rs create mode 100644 rpc/trusted/src/untrusted.rs create mode 100644 rpc/untrusted/Cargo.toml create mode 100644 rpc/untrusted/Makefile.toml create mode 100644 rpc/untrusted/build.rs create mode 100644 rpc/untrusted/src/ecall_proxy.rs create mode 100644 rpc/untrusted/src/enclave.rs create mode 100644 rpc/untrusted/src/lib.rs create mode 100644 rpc/untrusted/src/ocall_proxy.rs create mode 100644 rpc/untrusted/src/router.rs create mode 100755 scripts/benchmark.py create mode 100755 scripts/make-release.py create mode 100755 scripts/parse_enclave.py create mode 100755 scripts/run_contract.sh create mode 100755 scripts/sgx-enter-hw.sh create mode 100755 scripts/sgx-enter.sh create mode 100755 scripts/show-profile.py create mode 100644 scripts/start-aesmd.sh create mode 100755 scripts/tendermint-clear.sh create mode 100755 scripts/tendermint-start.sh create mode 100644 testnet/contract_benchmarks/.gitignore create mode 100644 testnet/contract_benchmarks/Makefile create mode 100644 testnet/contract_benchmarks/README.md create mode 100755 testnet/contract_benchmarks/benchmark.sh create mode 100644 testnet/contract_benchmarks/cluster.yaml create mode 100644 testnet/ethermint/Makefile create mode 100644 testnet/ethermint/benchmark.js create mode 100755 testnet/ethermint/benchmark.sh create mode 100644 testnet/ethermint/ethermint.yaml create mode 100644 testnet/tendermint/.gitignore create mode 100644 testnet/tendermint/Makefile create mode 100644 testnet/tendermint/README.md create mode 100755 testnet/tendermint/authorize_security_groups.sh create mode 100755 testnet/tendermint/cmd.sh create mode 100755 testnet/tendermint/cmd_serial.sh create mode 100644 testnet/tendermint/control/.keep create mode 100755 testnet/tendermint/create_known_hosts.sh create mode 100755 testnet/tendermint/create_ssh_config.sh create mode 100755 testnet/tendermint/create_validator_keys.sh create mode 100755 testnet/tendermint/get_ips.sh create mode 100644 testnet/tendermint/keys/.keep create mode 100755 testnet/tendermint/run_servers.sh create mode 100755 testnet/tendermint/send.sh create mode 100644 testnet/tendermint/start_control.sh create mode 100755 testnet/tendermint/upload_validator_keys.sh create mode 100644 testnet/tendermint/validators/.keep create mode 100644 tools/Cargo.toml create mode 100644 tools/Makefile.toml create mode 100644 tools/bin/main.rs create mode 100644 tools/src/cargo.rs create mode 100644 tools/src/contract.rs create mode 100644 tools/src/lib.rs create mode 100644 tools/src/utils.rs create mode 100644 xargo/Xargo.toml.template create mode 100644 xargo/x86_64-unknown-linux-sgx.json diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000000..038b8a84c76 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,82 @@ +version: 2 +jobs: + build: + docker: + - image: ekiden/testing + steps: + # Set up + - run: echo 'PS1='"'"'\$ '"'"'; . /root/.bashrc' >> $BASH_ENV + - run: echo 'export SGX_MODE=SIM' >> $BASH_ENV + - run: echo 'export INTEL_SGX_SDK=/opt/sgxsdk' >> $BASH_ENV + - checkout + + # Build + - run: cargo make build-flow + + # Rustfmt + - run: cargo make checkstyle + + # Cargo tests. Some tests are excluded as they currently don't compile. + - run: | + cargo test --all \ + --exclude ekiden-untrusted \ + --exclude ekiden-enclave-untrusted \ + --exclude ekiden-rpc-untrusted \ + --exclude ekiden-db-untrusted \ + --exclude ekiden-consensus \ + -- --test-threads 1 + + # Cargo benchmarks. We first fetch the latest benchmark results from master and then + # compare against them. + - run: | + set +e + set +o pipefail + wget -q -O - "https://circleci.com/api/v1.1/project/github/sunblaze-ucb/ekiden/latest/artifacts/?branch=master&circle-token=${CIRCLE_TOKEN}" | grep -o 'https://[^"]*' | xargs -P4 -I % wget -q -O /tmp/benchmarks-master.json %?circle-token=${CIRCLE_TOKEN} + - run: | + ./scripts/benchmark.py \ + ekiden-rpc-trusted \ + ekiden-db-trusted \ + --output /tmp/benchmarks.json \ + --compare-to /tmp/benchmarks-master.json + - store_artifacts: + path: /tmp/benchmarks.json + destination: benchmarks + + # Create enclave output directory. + - run: mkdir -p target/enclave + # Install ekiden-tools. + - run: cargo install --force --path tools ekiden-tools + # Build key manager contract. + - run: cargo ekiden build-contract ekiden-key-manager --path contracts/key-manager --output target/enclave + # Build token contract. + - run: cargo ekiden build-contract token --path contracts/token --output-identity --output target/enclave + # Start the consensus node. + - run: + command: ./target/debug/ekiden-consensus + background: true + # Start tendermint node. + - run: tendermint init + - run: + command: tendermint node --consensus.create_empty_blocks=false --rpc.laddr tcp://0.0.0.0:46666 --rpc.grpc_laddr tcp://0.0.0.0:46657 + background: true + # Start key manager compute node. + - run: + command: ./target/debug/ekiden-compute target/enclave/ekiden-key-manager.so -p 9003 --disable-key-manager --identity-file identity-km.pb + background: true + # Start token compute node. + - run: + command: ./target/debug/ekiden-compute target/enclave/token.so --identity-file identity-token.pb + background: true + # Start token client. + - run: ./target/debug/token-client --mr-enclave $(cat target/enclave/token.mrenclave) + +workflows: + version: 2 + build: + jobs: + - build +experimental: + notify: + branches: + only: + - master diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..eb5a316cbd1 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +target diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..cbab5909e63 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +# Build. +/target/ +**/*.rs.bk +*.a +*.o +*.so +**/generated +Cargo.lock +Xargo.toml + +# Enclave compilation +Enclave_t.c +Enclave_t.h + +# IDE. +.idea/* + +# Temporary files. +*.swp + +# Saved enclave identity. +/identity*.pb diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..e69de29bb2d diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 00000000000..ea9f3f458af --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,54 @@ +required_version = "0.3.6" +unstable_features = false +max_width = 100 +hard_tabs = false +tab_spaces = 4 +newline_style = "Unix" +indent_style = "Block" +use_small_heuristics = true +format_strings = false +wrap_comments = false +comment_width = 80 +normalize_comments = false +empty_item_single_line = true +struct_lit_single_line = true +fn_single_line = false +where_single_line = false +imports_indent = "Visual" +imports_layout = "Mixed" +reorder_extern_crates = true +reorder_extern_crates_in_group = true +reorder_imports = true +reorder_imports_in_group = true +reorder_imported_names = true +binop_separator = "Front" +type_punctuation_density = "Wide" +space_before_colon = false +space_after_colon = true +spaces_around_ranges = false +spaces_within_parens_and_brackets = false +combine_control_expr = true +struct_field_align_threshold = 0 +remove_blank_lines_at_start_or_end_of_block = true +match_arm_blocks = true +force_multiline_blocks = false +fn_args_density = "Tall" +brace_style = "SameLineWhere" +control_brace_style = "AlwaysSameLine" +trailing_comma = "Vertical" +trailing_semicolon = true +match_block_trailing_comma = false +blank_lines_upper_bound = 1 +blank_lines_lower_bound = 0 +merge_derives = true +use_try_shorthand = true +condense_wildcard_suffixes = false +force_explicit_abi = true +write_mode = "Overwrite" +color = "Auto" +disable_all_formatting = false +skip_children = false +error_on_line_overflow = true +error_on_unformatted = false +report_todo = "Never" +report_fixme = "Never" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000000..325ffb828d9 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,41 @@ +[workspace] +members = [ + "tools", + + # Common. + "common", + + # Enclave loader. + "enclave/common", + "enclave/untrusted", + "enclave/trusted", + + # RPC (+ attestation). + "rpc/common", + "rpc/client", + "rpc/untrusted", + "rpc/trusted", + "rpc/edl", + + # Database. + "db/untrusted", + "db/trusted", + "db/edl", + + # Core. + "core/common", + "core/untrusted", + "core/trusted", + "core/edl", + + # Compute node. + "compute/api", + "compute", + + # Consensus node. + "consensus/api", + "consensus", + + # Clients. + "clients/token", +] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile.toml b/Makefile.toml new file mode 100644 index 00000000000..e66f5ee697f --- /dev/null +++ b/Makefile.toml @@ -0,0 +1,108 @@ +[env] +PROJECT_ROOT = { script = ["git rev-parse --show-toplevel"] } +EKIDEN_ROOT = "${PROJECT_ROOT}" +INTEL_SGX_SDK = "/opt/sgxsdk" +SGX_ARCH = "x64" +XARGO_TARGET_PATH = "${EKIDEN_ROOT}/xargo" +XARGO_DEFAULT_CONFIG_TEMPLATE = "${EKIDEN_ROOT}/xargo/Xargo.toml.template" +BUILD_FOR_SGX_TARGET = "false" +BUILD_FOR_HOST_TARGET = "true" + +[tasks.default] +alias = "build-flow" + +[tasks.env-debug] +env = { "BUILD_MODE" = "debug" } + +[tasks.env-release] +env = { "BUILD_MODE" = "release" } + +[tasks.env-sgx-xargo] +env = { "RUSTFLAGS" = "-Z force-unstable-if-unmarked", "RUST_TARGET_PATH" = "${XARGO_TARGET_PATH}" } + +[tasks.build-flow] +dependencies = [ + "env-debug", + "pre-build", + "build", + "post-build", +] + +[tasks.build-release-flow] +dependencies = [ + "env-release", + "pre-build", + "build-release", + "post-build", +] + +[tasks.build] +# Alias needed to clear cargo-make defaults. +alias = "build-debug" + +[tasks.build-debug] +dependencies = [ + "build-no-sgx", + "env-sgx-xargo", + "build-sgx", +] + +[tasks.build-release] +# Alias needed to clear cargo-make defaults. +alias = "build-release-custom" + +[tasks.build-release-custom] +dependencies = [ + "build-no-sgx-release", + "env-sgx-xargo", + "build-sgx-release", +] + +[tasks.build-no-sgx] +condition = { env = { "BUILD_FOR_HOST_TARGET" = "true" } } +command = "cargo" +args = ["build"] + +[tasks.build-sgx] +condition = { env = { "BUILD_FOR_SGX_TARGET" = "true" } } +script = [''' + cleanup_xargo_toml() { + rm -f Xargo.toml + } + + if [ ! -f Xargo.toml ]; then + cp ${XARGO_DEFAULT_CONFIG_TEMPLATE} Xargo.toml + trap cleanup_xargo_toml EXIT INT TERM + fi + + xargo build --target x86_64-unknown-linux-sgx +'''] + +[tasks.build-no-sgx-release] +condition = { env = { "BUILD_FOR_HOST_TARGET" = "true" } } +command = "cargo" +args = ["build", "--release"] + +[tasks.build-sgx-release] +condition = { env = { "BUILD_FOR_SGX_TARGET" = "true" } } +script = [''' + cleanup_xargo_toml() { + rm -f Xargo.toml + } + + if [ ! -f Xargo.toml ]; then + cp ${XARGO_DEFAULT_CONFIG_TEMPLATE} Xargo.toml + trap cleanup_xargo_toml EXIT INT TERM + fi + + xargo build --target x86_64-unknown-linux-sgx --release +'''] + +[tasks.test-flow] +alias = "empty" + +[tasks.checkstyle] +condition = { channels = [ "nightly" ] } +install_crate = "rustfmt-nightly" +command = "cargo" +args = ["fmt", "--", "--write-mode=diff"] diff --git a/README.md b/README.md new file mode 100644 index 00000000000..de32786a668 --- /dev/null +++ b/README.md @@ -0,0 +1,154 @@ +# Ekiden + +[![CircleCI](https://circleci.com/gh/sunblaze-ucb/ekiden.svg?style=svg&circle-token=1e61090ac6971ca5db0514e4593d5fdeff83f6a9)](https://circleci.com/gh/sunblaze-ucb/ekiden) + +## Dependencies + +Here is a brief list of system dependencies currently used for development: +- [rustc](https://www.rust-lang.org/en-US/) +- [cargo](http://doc.crates.io/) +- [cargo-make](https://crates.io/crates/cargo-make) +- [xargo](https://github.com/japaric/xargo) +- [docker](https://www.docker.com/) +- [rust-sgx-sdk](https://github.com/ekiden/rust-sgx-sdk) +- [protoc](https://github.com/google/protobuf/releases) + +## Checking out + +The repository uses submodules so be sure to check them out by doing: +```bash +$ git submodule update --init --recursive +``` + +## Building + +The easiest way to build SGX code is to use the provided scripts, which run a Docker +container with all the included tools. This has been tested on MacOS and Ubuntu with `SGX_MODE=SIM`. + +To start the SGX development container: +```bash +$ ./scripts/sgx-enter.sh +``` + +Ekiden uses [`cargo-make`](https://crates.io/crates/cargo-make) as the build system. The +development Docker container already comes with `cargo-make` preinstalled. + +To build everything required for running Ekiden, simply run the following in the top-level +directory: +```bash +$ cargo make +``` + +This should install any required dependencies and build all packages. By default SGX code is +built in simulation mode. To change this, do `export SGX_MODE=HW` (currently untested) before +running the `cargo make` command. + +## Obtaining contract MRENCLAVE + +In order to establish authenticated channels with Ekiden contract enclaves, the client needs +to know the enclave hash (MRENCLAVE) so it knows that it is talking with the correct contract +code. + +To obtain the enclave hash, there is a utility that you can run: +```bash +$ python scripts/parse_enclave.py target/enclave/token.signed.so +``` + +This utility will output a lot of enclave metadata, the important part is: +``` + ... + ENCLAVEHASH e38ded31efe3beb062081dc9a7f9af4b785ae8fa2ce61e0bddec2b6aedb02484 + ... +``` + +You will need this hash when running the contract client (see below). + +## Obtaining SPID and generating PKCS#12 bundle + +In order to communicate with Intel Attestation Service (IAS), you need to generate a certificate +and get an SPID from Intel. For more information on that process, see the following links: +* [How to create self-signed certificates for use with Intel SGX RA](https://software.intel.com/en-us/articles/how-to-create-self-signed-certificates-for-use-with-intel-sgx-remote-attestation-using) +* [Apply for an SPID](https://software.intel.com/formfill/sgx-onboarding) + +You will need to pass both SPID and the PKCS#12 bundle when starting the compute node. + +## Running + +The easiest way to run Ekiden is through the provided scripts, +which set up the Docker containers for you. + +### Consensus node + +To build and run a consensus node: +```bash +$ bash scripts/sgx-enter.sh +$ cargo run -p consensus +``` + +The consensus node depends on a local instance of Tendermint +To start a Tendermint docker container that is linked to the container above: +```bash +$ bash ./scripts/tendermint-start.sh +``` + +Occasionally, you'll need to clear all persistent data. To clear all data: +```bash +$ bash ./scripts/tendermint-clear.sh +``` + +### Compute node + +Currently, the 3 processes (compute, consensus, tendermint) look for each other on `localhost`. +In order to attach secondary shells to an existing container, run +```bash +$ bash scripts/sgx-enter.sh +``` + +To run a contract on a compute node: +```bash +# optionally set the following env vars +export IAS_SPID="" +export IAS_PKCS="client.pfx" +scripts/run_contract.sh CONTRACT +``` + +To get a list of built contract enclaves: +```bash +$ ls ./target/enclave/*.signed.so +``` + +### Key manager + +The key manager contract is special and must be run in a compute node listening on port `9003` +by default. Run it as you would run any other compute node, but specifying the key manager +contract and changing the port: +```bash +$ scripts/run_contract.sh ekiden-key-manager -p 9003 --disable-key-manager --consensus-host disabled +``` + +### Contract client + +To run the token contract client: +```bash +$ scripts/run_contract.sh --client token +``` + +## Developing + +We welcome anyone to fork and submit a pull request! Please make sure to run `rustfmt` before submitting. + +```bash +$ cargo make format +``` + +## Packages +- `core`: Core external-facing libraries (aggregates `common`, `enclave`, `rpc`, `db`, etc.) +- `common`: Common functionality like error handling +- `enclave`: Enclave loader and identity attestation +- `rpc`: RPC functionality for use in enclaves +- `db`: Database functionality for use in enclaves +- `compute`: Ekiden compute node +- `consensus`: Ekiden consensus node +- `contracts`: Core contracts (`key-manager`, `token`) +- `tools`: Build tools +- `scripts`: Bash scripts for development diff --git a/clients/Makefile.toml b/clients/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/clients/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/clients/token/Cargo.toml b/clients/token/Cargo.toml new file mode 100644 index 00000000000..9a8557ce527 --- /dev/null +++ b/clients/token/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "token-client" +version = "0.1.0-alpha.1" +authors = [ + "Jernej Kos " +] + +[features] +default = [] +benchmark = ["client-utils/benchmark"] + +[dependencies] +client-utils = { path = "../utils" } +ekiden-core = { path = "../../core/common" } +ekiden-rpc-client = { path = "../../rpc/client" } +token-api = { path = "../../contracts/token/api" } +clap = "2.29.1" +rand = "0.4" +futures = "0.1" +tokio-core = "0.1" diff --git a/clients/token/Makefile.toml b/clients/token/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/clients/token/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/clients/token/src/main.rs b/clients/token/src/main.rs new file mode 100644 index 00000000000..b5cbe8a20df --- /dev/null +++ b/clients/token/src/main.rs @@ -0,0 +1,133 @@ +#![feature(use_extern_macros)] + +#[macro_use] +extern crate clap; +extern crate futures; +extern crate rand; +extern crate tokio_core; + +#[macro_use] +extern crate client_utils; +extern crate ekiden_core; +extern crate ekiden_rpc_client; + +extern crate token_api; + +use clap::{App, Arg}; +use futures::future::Future; +use rand::{thread_rng, Rng}; + +use ekiden_rpc_client::create_client_rpc; +use token_api::with_api; + +with_api! { + create_client_rpc!(token, token_api, api); +} + +/// Initializes the token scenario. +fn init(client: &mut token::Client, _runs: usize, _threads: usize) +where + Backend: ekiden_rpc_client::backend::ContractClientBackend, +{ + // Create new token contract. + let mut request = token::CreateRequest::new(); + request.set_sender("bank".to_string()); + request.set_token_name("Ekiden Token".to_string()); + request.set_token_symbol("EKI".to_string()); + request.set_initial_supply(8); + + client.create(request).wait().unwrap(); + + // Check balances. + let response = client + .get_balance({ + let mut request = token::GetBalanceRequest::new(); + request.set_account("bank".to_string()); + request + }) + .wait() + .unwrap(); + assert_eq!(response.get_balance(), 8_000_000_000_000_000_000); +} + +/// Create a new random token address. +fn create_address() -> String { + thread_rng().gen_ascii_chars().take(32).collect() +} + +/// Runs the token scenario. +fn scenario(client: &mut token::Client) +where + Backend: ekiden_rpc_client::backend::ContractClientBackend, +{ + // Generate random addresses. + let destination = create_address(); + let poor = create_address(); + + // Transfer some funds. + client + .transfer({ + let mut request = token::TransferRequest::new(); + request.set_sender("bank".to_string()); + request.set_destination(destination.clone()); + request.set_value(3); + request + }) + .wait() + .unwrap(); + + // Check balances. + let response = client + .get_balance({ + let mut request = token::GetBalanceRequest::new(); + request.set_account(destination.clone()); + request + }) + .wait() + .unwrap(); + assert_eq!(response.get_balance(), 3); + + let response = client + .get_balance({ + let mut request = token::GetBalanceRequest::new(); + request.set_account(poor.clone()); + request + }) + .wait() + .unwrap(); + assert_eq!(response.get_balance(), 0); +} + +/// Finalize the token scenario. +fn finalize(client: &mut token::Client, runs: usize, threads: usize) +where + Backend: ekiden_rpc_client::backend::ContractClientBackend, +{ + // Check final balance. + let response = client + .get_balance({ + let mut request = token::GetBalanceRequest::new(); + request.set_account("bank".to_string()); + request + }) + .wait() + .unwrap(); + assert_eq!( + response.get_balance(), + 8_000_000_000_000_000_000 - 3 * runs as u64 * threads as u64 + ); +} + +#[cfg(feature = "benchmark")] +fn main() { + let results = benchmark_client!(token, init, scenario, finalize); + results.show(); +} + +#[cfg(not(feature = "benchmark"))] +fn main() { + let mut client = contract_client!(token); + init(&mut client, 1, 1); + scenario(&mut client); + finalize(&mut client, 1, 1); +} diff --git a/clients/utils/Cargo.toml b/clients/utils/Cargo.toml new file mode 100644 index 00000000000..5fdf9427a37 --- /dev/null +++ b/clients/utils/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "client-utils" +version = "0.1.0-alpha.1" +authors = [ + "Nick Hynes ", + "Jernej Kos " +] + +[features] +benchmark = ["threadpool", "time", "histogram"] + +[dependencies] +threadpool = { version = "1.7.1", optional = true } +time = { version = "0.1", optional = true } +histogram = { version = "0.6.8", optional = true } diff --git a/clients/utils/Makefile.toml b/clients/utils/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/clients/utils/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/clients/utils/src/benchmark.rs b/clients/utils/src/benchmark.rs new file mode 100644 index 00000000000..c5565f76bdf --- /dev/null +++ b/clients/utils/src/benchmark.rs @@ -0,0 +1,230 @@ +use std::sync::Arc; +use std::sync::mpsc::channel; + +use histogram::Histogram; +use threadpool::ThreadPool; +use time; + +/// Client factory. +pub trait ClientFactory: Send + Sync + 'static { + type Client: Send + Sync; + + /// Create a new client instance. + fn create(&self) -> Self::Client; +} + +impl ClientFactory for F +where + Client: Send + Sync, + F: Send + Sync + 'static + Fn() -> Client, +{ + type Client = Client; + + fn create(&self) -> Client { + (*self)() + } +} + +/// Benchmark helper. +pub struct Benchmark { + /// Number of scenario runs. + runs: usize, + /// Workers. + pool: ThreadPool, + /// Client factory. + client_factory: Arc, +} + +/// Benchmark results for a single thread. +/// +/// All time values are in nanoseconds. +#[derive(Debug, Clone, Default)] +pub struct BenchmarkResult { + /// Amount of time taken for client initialization. This includes the time it + /// takes to establish a secure channel. + pub client_initialization: u64, + /// A vector of pairs `(start_time, end_time)` containing timestamps of when the + /// scenario has started and when it has finished. + pub scenario: Vec<(u64, u64)>, + /// Amount of time taken for client dropping. This includes the + /// time it takes to close a secure channel. + pub client_drop: u64, +} + +/// Set of benchmark results for all runs. +pub struct BenchmarkResults { + /// Number of runs. + pub runs: usize, + /// Benchmark results from non-panicked individual runs. + pub results: Vec, + /// The number of threads the experiment was run with. + pub threads: usize, +} + +impl BenchmarkResults { + /// Show one benchmark result. + fn show_result(&self, name: &str, result: &Histogram) { + println!("{}:", name); + println!( + " Percentiles: p50: {} ms / p90: {} ms / p99: {} ms / p999: {}", + result.percentile(50.0).unwrap(), + result.percentile(90.0).unwrap(), + result.percentile(99.0).unwrap(), + result.percentile(99.9).unwrap(), + ); + println!( + " Min: {} ms / Avg: {} ms / Max: {} ms / StdDev: {} ms", + result.minimum().unwrap(), + result.mean().unwrap(), + result.maximum().unwrap(), + result.stddev().unwrap(), + ); + } + + /// Show benchmark results in a human-readable form. + pub fn show(&self) { + // Prepare histograms. + let mut histogram_scenario = Histogram::new(); + let mut count = 0; + + let mut throughput_runs: Vec<(u64, u64)> = vec![]; + + for result in &self.results { + for &(start, end) in &result.scenario { + histogram_scenario + .increment((end - start) / 1_000_000) + .unwrap(); + + count += 1; + throughput_runs.push((start, end)); + } + } + + // Sort by start timestamp. + throughput_runs.sort_by(|a, b| a.0.cmp(&b.0)); + // Cut 10% at the beginning and the end. + let cut_amount = (throughput_runs.len() as f64 * 0.10).floor() as usize; + let throughput_runs = &throughput_runs[cut_amount..throughput_runs.len() - cut_amount]; + let total_nonoverlapping = + throughput_runs.last().unwrap().1 - throughput_runs.first().unwrap().0; + + let failures = (self.threads * self.runs) as u64 - count; + + println!("=== Benchmark Results ==="); + println!("Threads: {}", self.threads); + println!("Runs per thread: {}", self.runs); + println!("Non-panicked (npr): {}", count); + println!("Panicked: {}", failures); + + println!("--- Latency ---"); + self.show_result("Scenario", &histogram_scenario); + + println!("--- Throughput ---"); + println!("Middle 80%: {} npr", throughput_runs.len()); + println!( + "Scenario (middle 80%): {} ms ({} npr / sec)", + total_nonoverlapping / 1_000_000, + throughput_runs.len() as f64 / (total_nonoverlapping as f64 / 1e9) + ); + } +} + +/// Helper macro for timing a specific block of code. +macro_rules! time_block { + ($result:ident, $measurement:ident, $block:block) => {{ + let start = time::precise_time_ns(); + let result = $block; + $result.$measurement = time::precise_time_ns() - start; + + result + }} +} + +/// Helper to collect into a Vec without redeclaring an item type. +fn collect_vec(i: I) -> Vec { + i.collect() +} + +impl Benchmark +where + Factory: ClientFactory, +{ + /// Create a new benchmark helper. + pub fn new(runs: usize, threads: usize, client_factory: Factory) -> Self { + Benchmark { + runs: runs, + pool: ThreadPool::with_name("benchmark-scenario".into(), threads), + client_factory: Arc::new(client_factory), + } + } + + /// Run the given benchmark scenario. + /// + /// The `init` function will only be called once and should prepare the + /// grounds for running scenarios. Then multiple `scenario` invocations + /// will run in parallel. At the end, the `finalize` function will be + /// called once. + /// + /// Both `init` and `finalize` will be invoked with the number of runs + /// and the number of threads as the last two arguments. + pub fn run( + &self, + init: fn(&mut Factory::Client, usize, usize), + scenario: fn(&mut Factory::Client), + finalize: fn(&mut Factory::Client, usize, usize), + ) -> BenchmarkResults { + // Initialize. + println!("Initializing benchmark..."); + let mut client = self.client_factory.create(); + init(&mut client, self.runs, self.pool.max_count()); + + println!( + "Running benchmark with {} threads, each doing {} requests...", + self.pool.max_count(), + self.runs + ); + + let (tx, rx) = channel(); + for _ in 0..self.pool.max_count() { + let tx = tx.clone(); + let client_factory = self.client_factory.clone(); + let runs = self.runs; + + self.pool.execute(move || { + let mut result = BenchmarkResult::default(); + + // Create the client. + let mut client = + time_block!(result, client_initialization, { client_factory.create() }); + + // Run the scenario multiple times. + for _ in 0..runs { + let start = time::precise_time_ns(); + scenario(&mut client); + let end = time::precise_time_ns(); + + result.scenario.push((start, end)); + } + + time_block!(result, client_drop, { drop(client) }); + + tx.send(result).unwrap(); + }); + } + + self.pool.join(); + let results = collect_vec(rx.try_iter()); + + // Finalize. + println!("Finalizing benchmark..."); + let mut client = self.client_factory.create(); + finalize(&mut client, self.runs, self.pool.max_count()); + + // Collect benchmark results. + BenchmarkResults { + runs: self.runs, + results: results, + threads: self.pool.max_count(), + } + } +} diff --git a/clients/utils/src/lib.rs b/clients/utils/src/lib.rs new file mode 100644 index 00000000000..c9ba5d5ae92 --- /dev/null +++ b/clients/utils/src/lib.rs @@ -0,0 +1,12 @@ +#[cfg(feature = "benchmark")] +extern crate histogram; +#[cfg(feature = "benchmark")] +extern crate threadpool; +#[cfg(feature = "benchmark")] +extern crate time; + +#[cfg(feature = "benchmark")] +pub mod benchmark; + +#[macro_use] +mod macros; diff --git a/clients/utils/src/macros.rs b/clients/utils/src/macros.rs new file mode 100644 index 00000000000..ad3607f6399 --- /dev/null +++ b/clients/utils/src/macros.rs @@ -0,0 +1,132 @@ +#[macro_export] +macro_rules! default_app { + () => { + App::new(concat!(crate_name!(), " client")) + .about(crate_description!()) + .author(crate_authors!()) + .version(crate_version!()) + .arg(Arg::with_name("host") + .long("host") + .short("h") + .takes_value(true) + .default_value("localhost") + .display_order(1)) + .arg(Arg::with_name("port") + .long("port") + .short("p") + .takes_value(true) + .default_value("9001") + .display_order(2)) + .arg(Arg::with_name("nodes") + .long("nodes") + .help("A list of comma-separated compute node addresses (e.g. host1:9001,host2:9004)") + .takes_value(true)) + .arg(Arg::with_name("mr-enclave") + .long("mr-enclave") + .value_name("MRENCLAVE") + .help("MRENCLAVE in hex format") + .takes_value(true) + .required(true) + .display_order(3)) + }; +} + +#[macro_export] +macro_rules! default_backend { + ($args:ident) => {{ + // Create reactor (event loop) in a separate thread. + let (tx, rx) = std::sync::mpsc::channel(); + std::thread::spawn(move || { + let mut reactor = tokio_core::reactor::Core::new().unwrap(); + tx.send(reactor.remote()).unwrap(); + reactor.run(futures::empty::<(), ()>()).unwrap(); + }); + + let remote = rx.recv().unwrap(); + + if $args.is_present("nodes") { + // Pool of compute nodes. + use std::str::FromStr; + use ekiden_rpc_client::backend::web3::ComputeNodeAddress; + + let nodes: Vec = $args + .value_of("nodes") + .unwrap() + .split(",") + .map(|address: &str| { + let parts: Vec<&str> = address.split(":").collect(); + + ComputeNodeAddress { + host: parts[0].to_string(), + port: u16::from_str(&parts[1]).unwrap(), + } + }) + .collect(); + + ekiden_rpc_client::backend::Web3ContractClientBackend::new_pool( + remote, + &nodes + ).unwrap() + } else { + ekiden_rpc_client::backend::Web3ContractClientBackend::new( + remote, + $args.value_of("host").unwrap(), + value_t!($args, "port", u16).unwrap_or(9001) + ).unwrap() + } + }}; +} + +#[macro_export] +macro_rules! contract_client { + ($contract:ident, $args:ident, $backend:ident) => { + $contract::Client::new( + $backend, + value_t!($args, "mr-enclave", ekiden_core::enclave::quote::MrEnclave).unwrap_or_else(|e| e.exit()) + ) + }; + ($contract:ident, $args:ident) => { + { + let backend = default_backend!($args); + contract_client!($contract, $args, backend) + } + }; + ($contract:ident) => { + { + let args = default_app!().get_matches(); + contract_client!($contract, args) + } + }; +} + +#[cfg(feature = "benchmark")] +#[macro_export] +macro_rules! benchmark_client { + ($contract:ident, $init:expr, $scenario:expr, $finalize:expr) => {{ + let args = std::sync::Arc::new( + default_app!() + .arg(Arg::with_name("benchmark-threads") + .long("benchmark-threads") + .help("Number of benchmark threads") + .takes_value(true) + .default_value("4")) + .arg(Arg::with_name("benchmark-runs") + .long("benchmark-runs") + .help("Number of scenario runs") + .takes_value(true) + .default_value("1000")) + .get_matches() + ); + + let benchmark = $crate::benchmark::Benchmark::new( + value_t!(args, "benchmark-runs", usize).unwrap_or_else(|e| e.exit()), + value_t!(args, "benchmark-threads", usize).unwrap_or_else(|e| e.exit()), + move || { + let args = args.clone(); + contract_client!($contract, args) + } + ); + + benchmark.run($init, $scenario, $finalize) + }} +} diff --git a/common/Cargo.toml b/common/Cargo.toml new file mode 100644 index 00000000000..e29fdf4b672 --- /dev/null +++ b/common/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ekiden-common" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden common functionality (available to both trusted and untrusted parts)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[features] +default = [] +profiling = [] + +[dependencies] +protobuf = "1.4.3" +byteorder = "1" + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +rand = "0.4.2" diff --git a/common/Makefile.toml b/common/Makefile.toml new file mode 100644 index 00000000000..9dcb41fa936 --- /dev/null +++ b/common/Makefile.toml @@ -0,0 +1,4 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_SGX_TARGET = "true" diff --git a/common/src/error.rs b/common/src/error.rs new file mode 100644 index 00000000000..57cb3f06f9f --- /dev/null +++ b/common/src/error.rs @@ -0,0 +1,39 @@ +//! Error types used in Ekiden. +use std::{error, fmt, result}; + +/// A custom result type which uses `Error` to avoid the need to repeat the +/// error type over and over again. +pub type Result = result::Result; + +/// Error type for use in Ekiden crates. +#[derive(Debug, Clone)] +pub struct Error { + /// Error message. + pub message: String, +} + +impl Error { + /// Construct a new error instance. + pub fn new>(message: S) -> Self { + Error { + message: message.into(), + } + } + + /// A short description of the error. + pub fn description(&self) -> &str { + &self.message + } +} + +impl From for Error { + fn from(error: T) -> Self { + Self::new(error.description()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.message) + } +} diff --git a/common/src/hex_encoded.rs b/common/src/hex_encoded.rs new file mode 100644 index 00000000000..df12ed2aadb --- /dev/null +++ b/common/src/hex_encoded.rs @@ -0,0 +1,86 @@ +use std::marker::Sized; + +/// Parse error. +#[derive(Copy, Clone, Debug)] +pub enum ParseError { + InvalidLength, + InvalidCharacter, +} + +/// Type which can be parsed from a hex-encoded string. +pub trait HexEncoded { + const LEN: usize; + + fn inner(&mut self, index: usize) -> &mut u8; + + fn from_hex(s: &str) -> Result + where + Self: Sized + Default, + { + let mut result = Self::default(); + + if s.len() != 2 * Self::LEN { + return Err(ParseError::InvalidLength); + } + + let mut modulus = 0; + let mut buf = 0; + let mut output_idx = 0; + + for byte in s.bytes() { + buf <<= 4; + + match byte { + b'A'...b'F' => buf |= byte - b'A' + 10, + b'a'...b'f' => buf |= byte - b'a' + 10, + b'0'...b'9' => buf |= byte - b'0', + _ => return Err(ParseError::InvalidCharacter), + } + + modulus += 1; + if modulus == 2 { + modulus = 0; + *result.inner(output_idx) = buf; + output_idx += 1; + } + } + + Ok(result) + } +} + +#[macro_export] +macro_rules! hex_encoded_struct { + ($type: ident, $length_id: ident, $length: expr) => { + pub const $length_id: usize = $length; + + #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] + pub struct $type(pub [u8; $length]); + + impl $crate::hex_encoded::HexEncoded for $type { + const LEN: usize = $length_id; + + fn inner(&mut self, index: usize) -> &mut u8 { + &mut self.0[index] + } + } + + impl FromStr for $type { + type Err = $crate::hex_encoded::ParseError; + + fn from_str(s: &str) -> ::std::result::Result { + use $crate::hex_encoded::HexEncoded; + + Self::from_hex(&s) + } + } + + impl Deref for $type { + type Target = [u8; $length_id]; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + } +} diff --git a/common/src/lib.rs b/common/src/lib.rs new file mode 100644 index 00000000000..a94a033a018 --- /dev/null +++ b/common/src/lib.rs @@ -0,0 +1,19 @@ +#[cfg(not(target_env = "sgx"))] +extern crate rand; + +#[cfg(target_env = "sgx")] +extern crate sgx_trts; + +extern crate byteorder; +extern crate protobuf; + +pub mod error; +pub mod random; +#[macro_use] +pub mod serializer; + +#[macro_use] +pub mod hex_encoded; + +#[macro_use] +pub mod profiling; diff --git a/common/src/profiling.rs b/common/src/profiling.rs new file mode 100644 index 00000000000..be695daff1b --- /dev/null +++ b/common/src/profiling.rs @@ -0,0 +1,98 @@ +//! Profiling helpers. +use std::time::Instant; + +/// Guard for profiling a block of code. +pub struct ProfileGuard { + crate_name: &'static str, + function_name: &'static str, + block_name: &'static str, + start: Instant, +} + +impl ProfileGuard { + /// Create new profile guard. + pub fn new( + crate_name: &'static str, + function_name: &'static str, + block_name: &'static str, + ) -> Self { + ProfileGuard { + crate_name: &crate_name, + function_name: &function_name, + block_name: &block_name, + start: Instant::now(), + } + } + + /// Finalize profile guard and report results. + fn finalize(&self) { + let now = Instant::now(); + let duration = now.duration_since(self.start); + + if self.block_name.is_empty() { + println!( + "ekiden-profile:{}::{}={},{}", + self.crate_name, + self.function_name, + duration.as_secs(), + duration.subsec_nanos() + ); + } else { + println!( + "ekiden-profile:{}::{}::{}={},{}", + self.crate_name, + self.function_name, + self.block_name, + duration.as_secs(), + duration.subsec_nanos() + ); + } + } +} + +impl Drop for ProfileGuard { + fn drop(&mut self) { + self.finalize(); + } +} + +/// Profile a given block. +/// +/// Results of profiling are output to stdout. +#[cfg(feature = "profiling")] +#[macro_export] +macro_rules! profile_block { + ($block_name:expr) => { + let name = { + // Determine current function name. + fn f() {} + fn type_name_of(_: T) -> &'static str { + extern crate core; + unsafe { core::intrinsics::type_name::() } + } + let name = type_name_of(f); + &name[6..name.len() - 4] + }; + + let _guard = $crate::profiling::ProfileGuard::new( + env!("CARGO_PKG_NAME"), + &name, + &$block_name, + ); + }; + + () => { + profile_block!(""); + } +} + +/// Profile a given block. +/// +/// Results of profiling are output to stdout. +#[cfg(not(feature = "profiling"))] +#[macro_export] +macro_rules! profile_block { + ($block_name:expr) => (); + + () => () +} diff --git a/common/src/random.rs b/common/src/random.rs new file mode 100644 index 00000000000..8cfe88ff682 --- /dev/null +++ b/common/src/random.rs @@ -0,0 +1,32 @@ +//! Obtain random bytes in and outside enclaves. +#[cfg(not(target_env = "sgx"))] +use rand::{OsRng, Rng}; + +#[cfg(target_env = "sgx")] +use sgx_trts; + +use super::error::{Error, Result}; + +/// Fill destination type with random bytes. +#[cfg(target_env = "sgx")] +pub fn get_random_bytes(destination: &mut [u8]) -> Result<()> { + match sgx_trts::trts::rsgx_read_rand(destination) { + Ok(_) => {} + _ => return Err(Error::new("Random bytes failed")), + } + + Ok(()) +} + +/// Fill destination type with random bytes. +#[cfg(not(target_env = "sgx"))] +pub fn get_random_bytes(destination: &mut [u8]) -> Result<()> { + let mut rng = match OsRng::new() { + Ok(rng) => rng, + _ => return Err(Error::new("Random bytes failed")), + }; + + rng.fill_bytes(destination); + + Ok(()) +} diff --git a/common/src/serializer.rs b/common/src/serializer.rs new file mode 100644 index 00000000000..508880d20f3 --- /dev/null +++ b/common/src/serializer.rs @@ -0,0 +1,200 @@ +//! Serialization and deserialization. +use std::io::{Cursor, Read, Write}; +use std::str; + +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use protobuf::well_known_types::Empty; + +use super::error::Result; + +/// A serializer for a specific data type. +pub trait Serializable { + /// Serialize message of a given type into raw bytes. + fn write(&self) -> Result> { + // Default implementation just uses `write_to`. + let mut dst = Vec::new(); + self.write_to(&mut dst)?; + + Ok(dst) + } + + /// Write the contents of self into given writer. + /// + /// Returns the number of bytes written. + /// + /// # Notes + /// + /// Implementations should only write a single field of the given type into + /// the input stream (e.g., they should assume that there may be multiple + /// fields in the stream). + fn write_to(&self, writer: &mut Write) -> Result; +} + +/// A deserializer for a specific data type. +pub trait Deserializable { + /// Deserialize message of a given type from raw bytes. + fn read(value: &Vec) -> Result + where + Self: Sized, + { + // Default implementation just uses `read_from`. + Self::read_from(&mut Cursor::new(value)) + } + + /// Deserialize message of a given type from reader. + /// + /// # Notes + /// + /// Implementations should only read a single field of the given type from + /// the input stream (e.g., they should assume that there may be multiple + /// fields in the stream). + fn read_from(reader: &mut Read) -> Result + where + Self: Sized; +} + +impl Serializable for str { + fn write_to(&self, writer: &mut Write) -> Result { + // Encode string as length (little-endian u32) + UTF-8 value. + writer.write_u32::(self.len() as u32)?; + writer.write(self.as_bytes())?; + Ok(4 + self.len()) + } +} + +impl Serializable for String { + fn write_to(&self, writer: &mut Write) -> Result { + // Encode string as length (little-endian u32) + UTF-8 value. + writer.write_u32::(self.len() as u32)?; + writer.write(self.as_bytes())?; + Ok(4 + self.len()) + } +} + +impl Deserializable for String { + fn read_from(reader: &mut Read) -> Result { + // Decode string as length (little-endian u32) + UTF-8 value. + let length = reader.read_u32::()?; + let mut buffer = vec![0; length as usize]; + reader.read_exact(&mut buffer)?; + Ok(String::from_utf8(buffer)?) + } +} + +impl Serializable for Vec { + fn write_to(&self, writer: &mut Write) -> Result { + // Encode bytes as length (little-endian u32) + value. + writer.write_u32::(self.len() as u32)?; + writer.write(self)?; + Ok(4 + self.len()) + } +} + +impl Deserializable for Vec { + fn read_from(reader: &mut Read) -> Result { + // Decode bytes as length (little-endian u32) + value. + let length = reader.read_u32::()?; + let mut buffer = vec![0; length as usize]; + reader.read_exact(&mut buffer)?; + Ok(buffer) + } +} + +// Serializability for numeric types. +macro_rules! impl_serializable_numeric { + ($num_type:ty, $reader:ident, $writer:ident, 1) => { + impl Serializable for $num_type { + fn write_to(&self, writer: &mut Write) -> Result { + writer.$writer(*self)?; + Ok(1) + } + } + + impl Deserializable for $num_type { + fn read_from(reader: &mut Read) -> Result { + Ok(reader.$reader()?) + } + } + }; + + ($num_type:ty, $reader:ident, $writer:ident, $size:expr) => { + impl Serializable for $num_type { + fn write_to(&self, writer: &mut Write) -> Result { + writer.$writer::(*self)?; + Ok($size) + } + } + + impl Deserializable for $num_type { + fn read_from(reader: &mut Read) -> Result { + Ok(reader.$reader::()?) + } + } + } +} + +impl_serializable_numeric!(u8, read_u8, write_u8, 1); +impl_serializable_numeric!(u16, read_u16, write_u16, 2); +impl_serializable_numeric!(u32, read_u32, write_u32, 4); +impl_serializable_numeric!(u64, read_u64, write_u64, 8); +impl_serializable_numeric!(i8, read_i8, write_i8, 1); +impl_serializable_numeric!(i16, read_i16, write_i16, 2); +impl_serializable_numeric!(i32, read_i32, write_i32, 4); +impl_serializable_numeric!(i64, read_i64, write_i64, 8); +impl_serializable_numeric!(f32, read_f32, write_f32, 4); +impl_serializable_numeric!(f64, read_f64, write_f64, 8); + +impl Serializable for bool { + fn write_to(&self, writer: &mut Write) -> Result { + writer.write_u8(*self as u8)?; + Ok(1) + } +} + +impl Deserializable for bool { + fn read_from(reader: &mut Read) -> Result { + Ok(reader.read_u8()? == 0) + } +} + +/// Serializable implementation generator for Protocol Buffers messages. We cannot +/// just implement this generically for all types satisfying the `protobuf::Message` +/// bound as Rust currently lacks specialization support. +#[macro_export] +macro_rules! impl_serializable_protobuf { + ($message:ty) => { + impl $crate::serializer::Serializable for $message { + /// Serialize message of a given type into raw bytes. + fn write(&self) -> $crate::error::Result> { + use ::protobuf::Message; + + Ok(self.write_to_bytes()?) + } + + /// Write the contents of self into given writer. + /// + /// Returns the number of bytes written. + fn write_to(&self, writer: &mut ::std::io::Write) -> $crate::error::Result { + use ::protobuf::Message; + + self.write_to_writer(writer)?; + + Ok(self.compute_size() as usize) + } + } + + impl $crate::serializer::Deserializable for $message { + /// Deserialize message of a given type from raw bytes. + fn read(value: &Vec) -> $crate::error::Result { + Ok(::protobuf::parse_from_bytes(&value)?) + } + + /// Deserialize message of a given type from reader. + fn read_from(reader: &mut ::std::io::Read) -> $crate::error::Result { + Ok(::protobuf::parse_from_reader(reader)?) + } + } + } +} + +impl_serializable_protobuf!(Empty); diff --git a/compute/Cargo.toml b/compute/Cargo.toml new file mode 100644 index 00000000000..3427fd1f71d --- /dev/null +++ b/compute/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "ekiden-compute" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden compute node" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[features] +no_cache = [] +no_diffs = [] + +[dependencies] +ekiden-core = { path = "../core/common", version = "0.1.0-alpha.1" } +ekiden-untrusted = { path = "../core/untrusted", version = "0.1.0-alpha.1" } +ekiden-rpc-client = { path = "../rpc/client", version = "0.1.0-alpha.1" } +ekiden-compute-api = { path = "./api", version = "0.1.0-alpha.1" } +ekiden-consensus-api = { path = "../consensus/api", version = "0.1.0-alpha.1" } +protobuf = "1.4.2" +grpc = "0.2.1" +futures = "0.1" +futures-cpupool = "0.1.*" +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +tls-api = "0.1.12" +httpbis = "0.4.1" +thread_local = "0.3.5" +clap = "2.29.1" +reqwest = "0.8.2" +base64 = "0.9.0" +prometheus = "0.3.10" +hyper = "0.11" +time = "0.1" +tokio-core = "0.1" + +[build-dependencies] +ekiden-tools = { path = "../tools", version = "0.1.0-alpha.1" } +ekiden-edl = { path = "../core/edl", version = "0.1.0-alpha.1" } +protoc-rust = "1.4" +protoc-rust-grpc = "0.2.1" diff --git a/compute/Makefile.toml b/compute/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/compute/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/compute/api/Cargo.toml b/compute/api/Cargo.toml new file mode 100644 index 00000000000..210a8905190 --- /dev/null +++ b/compute/api/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ekiden-compute-api" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden compute node API" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[dependencies] +protobuf = "1.4.2" +grpc = "0.2.1" +tls-api = "0.1.12" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } +protoc-rust = "1.4" +protoc-rust-grpc = "0.2.1" diff --git a/compute/api/Makefile.toml b/compute/api/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/compute/api/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/compute/api/build.rs b/compute/api/build.rs new file mode 100644 index 00000000000..d0681925cc2 --- /dev/null +++ b/compute/api/build.rs @@ -0,0 +1,17 @@ +extern crate ekiden_tools; +extern crate protoc_rust_grpc; + +fn main() { + // Generate module file. + // Must be done first to create src/generated directory + ekiden_tools::generate_mod("src/generated", &["compute_web3", "compute_web3_grpc"]); + + protoc_rust_grpc::run(protoc_rust_grpc::Args { + out_dir: "src/generated/", + includes: &["src"], + input: &["src/compute_web3.proto"], + rust_protobuf: true, + }).expect("protoc-rust-grpc"); + + println!("cargo:rerun-if-changed={}", "src/compute_web3.proto"); +} diff --git a/compute/api/src/compute_web3.proto b/compute/api/src/compute_web3.proto new file mode 100644 index 00000000000..01e1aaf7e92 --- /dev/null +++ b/compute/api/src/compute_web3.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package compute_web3; + +service Compute { + // Contract interface. + rpc CallContract (CallContractRequest) returns (CallContractResponse) {} +} + +message CallContractRequest { + // Raw contract request payload that will be passed to the + // contract (Protocol Buffers serialized). + bytes payload = 1; +} + +message CallContractResponse { + // Raw contract dependent response payload (Protocol Buffers serialized). + bytes payload = 1; +} diff --git a/compute/api/src/lib.rs b/compute/api/src/lib.rs new file mode 100644 index 00000000000..48061a0a37b --- /dev/null +++ b/compute/api/src/lib.rs @@ -0,0 +1,8 @@ +extern crate grpc; +extern crate protobuf; +extern crate tls_api; + +mod generated; + +pub use generated::compute_web3::*; +pub use generated::compute_web3_grpc::*; diff --git a/compute/build.rs b/compute/build.rs new file mode 100644 index 00000000000..80a809fe871 --- /dev/null +++ b/compute/build.rs @@ -0,0 +1,6 @@ +extern crate ekiden_edl; +extern crate ekiden_tools; + +fn main() { + ekiden_tools::build_untrusted(ekiden_edl::edl()); +} diff --git a/compute/src/handlers.rs b/compute/src/handlers.rs new file mode 100644 index 00000000000..80db2d957cd --- /dev/null +++ b/compute/src/handlers.rs @@ -0,0 +1,53 @@ +//! Handlers for the endpoints available to be called from inside the enclave, +//! which are registered using RpcRouter. + +use futures::Future; +use tokio_core; + +use ekiden_core::error::{Error, Result}; +use ekiden_core::rpc::client::ClientEndpoint; +use ekiden_untrusted::rpc::router::Handler; + +use ekiden_rpc_client::backend::{ContractClientBackend, Web3ContractClientBackend}; + +/// Generic contract endpoint. +/// +/// This endpoint can be used to forward requests to an arbitrary destination +/// contract, defined by the `hostname` and `port` of the compute node that is +/// running the contract. +pub struct ContractForwarder { + /// Client endpoint identifier. + endpoint: ClientEndpoint, + /// Client backend. + client: Web3ContractClientBackend, +} + +impl ContractForwarder { + pub fn new( + endpoint: ClientEndpoint, + reactor: tokio_core::reactor::Remote, + host: String, + port: u16, + ) -> Self { + ContractForwarder { + endpoint: endpoint, + client: Web3ContractClientBackend::new(reactor, &host, port).unwrap(), + } + } +} + +impl Handler for ContractForwarder { + /// Return a list of endpoints that the handler can handle. + fn get_endpoints(&self) -> Vec { + vec![self.endpoint.clone()] + } + + /// Handle a request and return a response. + fn handle(&self, _endpoint: &ClientEndpoint, request: Vec) -> Result> { + // Currently all OCALLs are blocking so this handler is blocking as well. + match self.client.call_raw(request).wait() { + Ok(response) => Ok(response), + _ => Err(Error::new("RPC call failed")), + } + } +} diff --git a/compute/src/ias.rs b/compute/src/ias.rs new file mode 100644 index 00000000000..d2d3afcf312 --- /dev/null +++ b/compute/src/ias.rs @@ -0,0 +1,166 @@ +use sgx_types; + +use std::collections::HashMap; +use std::fs::File; +use std::io::Read; +use std::ops::Deref; +use std::str::FromStr; + +use base64; +use reqwest; + +use ekiden_core::enclave::api as identity_api; +use ekiden_core::error::{Error, Result}; +use ekiden_core::hex_encoded_struct; +use ekiden_untrusted::enclave; + +/// Intel IAS API URL. +const IAS_API_URL: &'static str = "https://test-as.sgx.trustedservices.intel.com"; +/// Intel IAS report endpoint. +/// +/// See [https://software.intel.com/sites/default/files/managed/7e/3b/ias-api-spec.pdf]. +const IAS_ENDPOINT_REPORT: &'static str = "/attestation/sgx/v2/report"; + +// SPID. +hex_encoded_struct!(SPID, SPID_LEN, 16); + +/// IAS configuration. +/// +/// The `spid` is a valid SPID obtained from Intel, while `pkcs12_archive` +/// is the path to the PKCS#12 archive (certificate and private key), which +/// will be used to authenticate to IAS. +pub struct IASConfiguration { + /// SPID assigned by Intel. + pub spid: SPID, + /// PKCS#12 archive containing the identity for authenticating to IAS. + pub pkcs12_archive: String, +} + +/// IAS (Intel Attestation Service) interface. +#[derive(Clone)] +pub struct IAS { + /// SPID assigned by Intel. + spid: sgx_types::sgx_spid_t, + /// Client used for IAS requests. + client: Option, +} + +impl IAS { + /// Construct new IAS interface. + pub fn new(config: Option) -> Result { + match config { + Some(config) => { + Ok(IAS { + spid: sgx_types::sgx_spid_t { + id: config.spid.clone().0, + }, + client: { + // Read and parse PKCS#12 archive. + let mut buffer = Vec::new(); + File::open(&config.pkcs12_archive)?.read_to_end(&mut buffer)?; + let identity = match reqwest::Identity::from_pkcs12_der(&buffer, "") { + Ok(identity) => identity, + _ => return Err(Error::new("Failed to load IAS credentials")), + }; + + // Create client with the identity. + match reqwest::ClientBuilder::new().identity(identity).build() { + Ok(client) => Some(client), + _ => return Err(Error::new("Failed to create IAS client")), + } + }, + }) + } + None => Ok(IAS { + spid: sgx_types::sgx_spid_t { id: [0; SPID_LEN] }, + client: None, + }), + } + } + + /// Make authenticated web request to IAS. + fn make_request( + &self, + endpoint: &str, + data: &HashMap<&str, String>, + ) -> Result { + let endpoint = format!("{}{}", IAS_API_URL, endpoint); + + let client = match self.client { + Some(ref client) => client, + None => return Err(Error::new("IAS is not configured")), + }; + + match client.post(&endpoint).json(&data).send() { + Ok(response) => Ok(response), + _ => return Err(Error::new("Request to IAS failed")), + } + } + + /// Make authenticated web request to IAS report endpoint. + pub fn verify_quote(&self, nonce: &[u8], quote: &[u8]) -> Result { + // Generate mock report when client is not configured. + if self.client.is_none() { + let mut av_report = identity_api::AvReport::new(); + av_report.set_body( + // TODO: Generate other mock fields. + format!( + "{{\"isvEnclaveQuoteStatus\": \"OK\", \"isvEnclaveQuoteBody\": \"{}\"}}", + base64::encode("e) + ).into_bytes(), + ); + + return Ok(av_report); + } + + let mut request = HashMap::new(); + request.insert("isvEnclaveQuote", base64::encode("e)); + request.insert("nonce", base64::encode(&nonce)); + + let mut response = self.make_request(IAS_ENDPOINT_REPORT, &request)?; + if !response.status().is_success() { + return Err(Error::new("Request to IAS failed")); + } + + let mut av_report = identity_api::AvReport::new(); + av_report.set_body(response.text()?.into_bytes()); + av_report.set_signature( + response + .headers() + .get_raw("X-IASReport-Signature") + .unwrap() + .one() + .unwrap() + .to_vec(), + ); + av_report.set_certificates( + response + .headers() + .get_raw("X-IASReport-Signing-Certificate") + .unwrap() + .one() + .unwrap() + .to_vec(), + ); + + Ok(av_report) + } +} + +impl enclave::identity::IAS for IAS { + fn get_spid(&self) -> &sgx_types::sgx_spid_t { + &self.spid + } + + fn get_quote_type(&self) -> sgx_types::sgx_quote_sign_type_t { + sgx_types::sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE + } + + fn sigrl(&self, _gid: &sgx_types::sgx_epid_group_id_t) -> Vec { + unimplemented!() + } + + fn report(&self, quote: &[u8]) -> identity_api::AvReport { + self.verify_quote(&[], quote).expect("IAS::verify_quote") + } +} diff --git a/compute/src/instrumentation.rs b/compute/src/instrumentation.rs new file mode 100644 index 00000000000..37ed0eeefa8 --- /dev/null +++ b/compute/src/instrumentation.rs @@ -0,0 +1,105 @@ +use std; + +use futures; +use hyper; +use prometheus; +use prometheus::Encoder; + +/// Worker thread metrics. +pub struct WorkerMetrics { + /// Incremented in each batch of requests. + pub reqs_batches_started: prometheus::Counter, + /// Time spent by worker thread in an entire batch of requests. + pub req_time_batch: prometheus::Histogram, + /// Time spent by worker thread in a single request. + pub req_time_enclave: prometheus::Histogram, + /// Time spent getting state from consensus. + pub consensus_get_time: prometheus::Histogram, + /// Time spent setting state in consensus. + pub consensus_set_time: prometheus::Histogram, +} + +impl WorkerMetrics { + pub fn new() -> Self { + WorkerMetrics { + reqs_batches_started: register_counter!( + "reqs_batches_started", + "Incremented in each batch of requests." + ).unwrap(), + req_time_batch: register_histogram!( + "req_time_batch", + "Time spent by worker thread in an entire batch of requests." + ).unwrap(), + req_time_enclave: register_histogram!( + "req_time_enclave", + "Time spent by worker thread in a single request." + ).unwrap(), + consensus_get_time: register_histogram!( + "consensus_get_time", + "Time spent getting state from consensus." + ).unwrap(), + consensus_set_time: register_histogram!( + "consensus_set_time", + "Time spent setting state in consensus." + ).unwrap(), + } + } +} + +/// GRPC handler metrics. +pub struct HandlerMetrics { + /// Incremented in each request. + pub reqs_received: prometheus::Counter, + /// Time spent by grpc thread handling a request. + pub req_time_client: prometheus::Histogram, +} + +impl HandlerMetrics { + pub fn new() -> Self { + HandlerMetrics { + reqs_received: register_counter!("reqs_received", "Incremented in each request.") + .unwrap(), + req_time_client: register_histogram!( + "req_time_client", + "Time spent by grpc thread handling a request." + ).unwrap(), + } + } +} + +struct MetricsService; + +impl hyper::server::Service for MetricsService { + // boilerplate hooking up hyper's server types + type Request = hyper::server::Request; + type Response = hyper::server::Response; + type Error = hyper::Error; + // The future representing the eventual Response your call will + // resolve to. This can change to whatever Future you need. + type Future = Box>; + + fn call(&self, _req: Self::Request) -> Self::Future { + let enc = prometheus::TextEncoder::new(); + let type_mime = enc.format_type().parse().unwrap(); + let mut buf = Vec::new(); + // If this can practically fail, forward the error to the response. + enc.encode(&prometheus::gather(), &mut buf).unwrap(); + Box::new(futures::future::ok( + Self::Response::new() + .with_header(hyper::header::ContentType(type_mime)) + .with_body(buf), + )) + } +} + +/// Start an HTTP server for Prometheus metrics in a thread. +pub fn start_http_server(addr: std::net::SocketAddr) { + std::thread::spawn(move || { + // move addr + hyper::server::Http::new() + .bind(&addr, || Ok(MetricsService)) + .unwrap() + .run() + .unwrap(); + }); +} diff --git a/compute/src/main.rs b/compute/src/main.rs new file mode 100644 index 00000000000..6b3762a2962 --- /dev/null +++ b/compute/src/main.rs @@ -0,0 +1,205 @@ +#![feature(use_extern_macros)] + +extern crate sgx_types; + +extern crate base64; +extern crate futures; +extern crate futures_cpupool; +extern crate grpc; +extern crate protobuf; +extern crate reqwest; +extern crate thread_local; +extern crate time; +extern crate tls_api; +extern crate tokio_core; + +#[macro_use] +extern crate clap; +extern crate hyper; +#[macro_use] +extern crate prometheus; + +extern crate ekiden_compute_api; +extern crate ekiden_consensus_api; +extern crate ekiden_core; +extern crate ekiden_rpc_client; +extern crate ekiden_untrusted; + +mod ias; +mod instrumentation; +mod handlers; +mod server; + +use std::path::Path; +use std::thread; + +use ekiden_compute_api::ComputeServer; +use ekiden_core::rpc::client::ClientEndpoint; +use ekiden_untrusted::rpc::router::RpcRouter; + +use clap::{App, Arg}; +use server::ComputeServerImpl; + +fn main() { + let matches = App::new("Ekiden Compute Node") + .version("0.1.0") + .author("Jernej Kos ") + .about("Ekident compute node server") + .arg( + Arg::with_name("contract") + .index(1) + .value_name("CONTRACT") + .help("Signed contract filename") + .takes_value(true) + .required(true) + .display_order(1) + .index(1), + ) + .arg( + Arg::with_name("port") + .long("port") + .short("p") + .takes_value(true) + .default_value("9001") + .display_order(2), + ) + .arg( + Arg::with_name("ias-spid") + .long("ias-spid") + .value_name("SPID") + .help("IAS SPID in hex format") + .takes_value(true) + .requires("ias-pkcs12"), + ) + .arg( + Arg::with_name("ias-pkcs12") + .long("ias-pkcs12") + .help("Path to IAS client certificate and private key PKCS#12 archive") + .takes_value(true) + .requires("ias-spid"), + ) + .arg( + Arg::with_name("key-manager-host") + .long("key-manager-host") + .takes_value(true) + .default_value("localhost"), + ) + .arg( + Arg::with_name("key-manager-port") + .long("key-manager-port") + .takes_value(true) + .default_value("9003"), + ) + .arg( + Arg::with_name("consensus-host") + .long("consensus-host") + .takes_value(true) + .default_value("localhost"), + ) + .arg( + Arg::with_name("consensus-port") + .long("consensus-port") + .takes_value(true) + .default_value("9002"), + ) + .arg(Arg::with_name("disable-key-manager").long("disable-key-manager")) + .arg( + Arg::with_name("grpc-threads") + .long("grpc-threads") + .help("Number of threads to use in the GRPC server's HTTP server. Multiple threads only allow requests to be batched up. Requests will not be processed concurrently.") + .default_value("1") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-addr") + .long("metrics-addr") + .help("A SocketAddr (as a string) from which to serve metrics to Prometheus.") + .takes_value(true) + ) + .arg( + Arg::with_name("max-batch-size") + .long("max-batch-size") + .help("Maximum size of a batch of requests") + .default_value("1000") + .takes_value(true), + ) + .arg( + Arg::with_name("max-batch-timeout") + .long("max-batch-timeout") + .help("Maximum timeout when waiting for a batch (in ms)") + .default_value("1000") + .takes_value(true), + ) + .arg( + Arg::with_name("identity-file") + .long("identity-file") + .help("Path for saving persistent enclave identity") + .default_value("identity.pb") + .takes_value(true), + ) + .get_matches(); + + let port = value_t!(matches, "port", u16).unwrap_or(9001); + + // Create reactor (event loop). + let reactor = tokio_core::reactor::Core::new().unwrap(); + + // Setup IAS. + let ias = ias::IAS::new(if matches.is_present("ias-spid") { + Some(ias::IASConfiguration { + spid: value_t!(matches, "ias-spid", ias::SPID).unwrap_or_else(|e| e.exit()), + pkcs12_archive: matches.value_of("ias-pkcs12").unwrap().to_string(), + }) + } else { + eprintln!("WARNING: IAS is not configured, validation will always return an error."); + + None + }).unwrap(); + + // Setup enclave RPC routing. + { + let mut router = RpcRouter::get_mut(); + + // Key manager endpoint. + if !matches.is_present("disable-key-manager") { + router.add_handler(handlers::ContractForwarder::new( + ClientEndpoint::KeyManager, + reactor.remote(), + matches.value_of("key-manager-host").unwrap().to_string(), + value_t!(matches, "key-manager-port", u16).unwrap_or(9003), + )); + } + } + + // Start the gRPC server. + let mut server = grpc::ServerBuilder::new_plain(); + server.http.set_port(port); + let contract_filename = matches.value_of("contract").unwrap(); + if !Path::new(contract_filename).exists() { + panic!(format!("Could not find contract: {}", contract_filename)) + } + server.add_service(ComputeServer::new_service_def(ComputeServerImpl::new( + &contract_filename, + matches.value_of("consensus-host").unwrap(), + value_t!(matches, "consensus-port", u16).unwrap_or(9002), + value_t!(matches, "max-batch-size", usize).unwrap_or(1000), + value_t!(matches, "max-batch-timeout", u64).unwrap_or(1000) * 1_000_000, + ias, + matches.value_of("identity-file").unwrap_or("identity.pb"), + ))); + let num_threads = value_t!(matches, "grpc-threads", usize).unwrap(); + server.http.set_cpu_pool_threads(num_threads); + // TODO: Reuse the same event loop in gRPC once this is exposed. + let _server = server.build().expect("server"); + + println!("Compute node listening at {}", port); + + // Start the Prometheus metrics endpoint. + if let Ok(metrics_addr) = value_t!(matches, "metrics-addr", std::net::SocketAddr) { + instrumentation::start_http_server(metrics_addr); + } + + loop { + thread::park(); + } +} diff --git a/compute/src/server.rs b/compute/src/server.rs new file mode 100644 index 00000000000..1d1768c23f0 --- /dev/null +++ b/compute/src/server.rs @@ -0,0 +1,468 @@ +use grpc; + +use protobuf; +use protobuf::Message; + +use thread_local::ThreadLocal; + +use futures::Future; +use futures::sync::oneshot; + +use time; + +use std; +use std::error::Error as StdError; +use std::fmt::Write; +use std::sync::Mutex; +use std::sync::mpsc::{channel, Receiver, Sender}; + +use ekiden_compute_api::{CallContractRequest, CallContractResponse, Compute}; +use ekiden_consensus_api::{self, Consensus, ConsensusClient}; +use ekiden_core::enclave::api::IdentityProof; +use ekiden_core::enclave::quote; +use ekiden_core::error::{Error, Result}; +use ekiden_core::rpc::api; +use ekiden_untrusted::{Enclave, EnclaveDb, EnclaveIdentity, EnclaveRpc}; + +use super::ias::IAS; +use super::instrumentation; + +/// This struct describes a call sent to the worker thread. +struct QueuedRequest { + /// This is the request from the client. + rpc_request: CallContractRequest, + /// This is a channel where the worker should send the response. The channel is only + /// available until it has been used for sending a response and is None afterwards. + response_sender: Option>>, +} + +/// This struct associates a response with a request. +struct QueuedResponse<'a> { + /// This is the request. Notably, it owns the channel where we + /// will be sending the response. + queued_request: &'a mut QueuedRequest, + /// This is the response. + response: Result, +} + +struct CachedStateInitialized { + encrypted_state: Vec, + height: u64, +} + +struct ComputeServerWorker { + /// Consensus client. + consensus: Option, + /// Contract running in an enclave. + contract: Enclave, + /// Enclave identity proof. + #[allow(dead_code)] + identity_proof: IdentityProof, + /// Cached state reconstituted from checkpoint and diffs. None if + /// cache or state is uninitialized. + cached_state: Option, + /// Instrumentation objects. + ins: instrumentation::WorkerMetrics, + /// Maximum batch size. + max_batch_size: usize, + /// Maximum batch timeout. + max_batch_timeout: u64, +} + +impl ComputeServerWorker { + fn new( + contract_filename: &str, + consensus_host: &str, + consensus_port: u16, + max_batch_size: usize, + max_batch_timeout: u64, + ias: &IAS, + saved_identity_path: &str, + ) -> Self { + let (contract, identity_proof) = + Self::create_contract(contract_filename, ias, saved_identity_path); + ComputeServerWorker { + contract, + identity_proof, + cached_state: None, + ins: instrumentation::WorkerMetrics::new(), + max_batch_size: max_batch_size, + max_batch_timeout: max_batch_timeout, + // Connect to consensus node + // TODO: Use TLS client. + consensus: match ConsensusClient::new_plain( + &consensus_host, + consensus_port, + Default::default(), + ) { + Ok(client) => Some(client), + _ => { + eprintln!( + "WARNING: Failed to create consensus client. No state will be fetched." + ); + + None + } + }, + } + } + + /// Create an instance of the contract. + fn create_contract( + contract_filename: &str, + ias: &IAS, + saved_identity_path: &str, + ) -> (Enclave, IdentityProof) { + // TODO: Handle contract initialization errors. + let contract = Enclave::new(contract_filename).unwrap(); + + // Initialize contract. + let identity_proof = contract + .identity_init(ias, saved_identity_path.as_ref()) + .expect("EnclaveIdentity::identity_init"); + + // Show contract MRENCLAVE in hex format. + let iai = quote::verify(&identity_proof).expect("Enclave identity proof invalid"); + let mut mr_enclave = String::new(); + for &byte in &iai.mr_enclave[..] { + write!(&mut mr_enclave, "{:02x}", byte).unwrap(); + } + + println!("Loaded contract with MRENCLAVE: {}", mr_enclave); + + (contract, identity_proof) + } + + #[cfg(not(feature = "no_cache"))] + fn get_cached_state_height(&self) -> Option { + match self.cached_state.as_ref() { + Some(csi) => Some(csi.height), + None => None, + } + } + + fn set_cached_state(&mut self, checkpoint: &ekiden_consensus_api::Checkpoint) -> Result<()> { + self.cached_state = Some(CachedStateInitialized { + encrypted_state: checkpoint.get_payload().to_vec(), + height: checkpoint.get_height(), + }); + Ok(()) + } + + fn advance_cached_state(&mut self, diffs: &[Vec]) -> Result> { + #[cfg(feature = "no_diffs")] + assert!( + diffs.is_empty(), + "attempted to apply diffs in a no_diffs build" + ); + + let csi = self.cached_state.as_mut().ok_or(Error::new( + "advance_cached_state called with uninitialized cached state", + ))?; + + for diff in diffs { + csi.encrypted_state = self.contract.db_state_apply(&csi.encrypted_state, &diff)?; + csi.height += 1; + } + + Ok(csi.encrypted_state.clone()) + } + + fn call_contract_batch_fallible<'a>( + &mut self, + request_batch: &'a mut [QueuedRequest], + ) -> Result>> { + // Get state updates from consensus + let encrypted_state_opt = if self.consensus.is_some() { + let _consensus_get_timer = self.ins.consensus_get_time.start_timer(); + + #[cfg(not(feature = "no_cache"))] + let cached_state_height = self.get_cached_state_height(); + #[cfg(feature = "no_cache")] + let cached_state_height = None; + + match cached_state_height { + Some(height) => { + let (_, consensus_response, _) = self.consensus + .as_ref() + .unwrap() + .get_diffs(grpc::RequestOptions::new(), { + let mut consensus_request = + ekiden_consensus_api::GetDiffsRequest::new(); + consensus_request.set_since_height(height); + consensus_request + }) + .wait()?; + if consensus_response.has_checkpoint() { + self.set_cached_state(consensus_response.get_checkpoint())?; + } + Some(self.advance_cached_state(consensus_response.get_diffs())?) + } + None => { + if let Ok((_, consensus_response, _)) = self.consensus + .as_ref() + .unwrap() + .get( + grpc::RequestOptions::new(), + ekiden_consensus_api::GetRequest::new(), + ) + .wait() + { + self.set_cached_state(consensus_response.get_checkpoint())?; + Some(self.advance_cached_state(consensus_response.get_diffs())?) + } else { + // We should bail if there was an error other + // than the state not being initialized. But + // don't go fixing this. There's another + // resolution planned in #95. + None + } + } + } + } else { + None + }; + + #[cfg(not(feature = "no_diffs"))] + let orig_encrypted_state_opt = encrypted_state_opt.clone(); + #[cfg(feature = "no_diffs")] + let orig_encrypted_state_opt = None; + + // Call contract with batch of requests. + let mut enclave_request = api::EnclaveRequest::new(); + + // Prepare batch of requests. + { + let client_requests = enclave_request.mut_client_request(); + for ref queued_request in request_batch.iter() { + // TODO: Why doesn't enclave request contain bytes directly? + let client_request = + protobuf::parse_from_bytes(queued_request.rpc_request.get_payload())?; + client_requests.push(client_request); + } + } + + // Add state if it is available. + if let Some(encrypted_state) = encrypted_state_opt { + self.contract.db_state_set(&encrypted_state)?; + } + + let enclave_request_bytes = enclave_request.write_to_bytes()?; + let enclave_response_bytes = { + let _enclave_timer = self.ins.req_time_enclave.start_timer(); + self.contract.call_raw(enclave_request_bytes) + }?; + + let enclave_response: api::EnclaveResponse = + protobuf::parse_from_bytes(&enclave_response_bytes)?; + + // Assert equal number of responses, fail otherwise (corrupted response). + if enclave_response.get_client_response().len() != request_batch.len() { + return Err(Error::new( + "Corrupted response (response count != request count)", + )); + } + + let mut response_batch = vec![]; + for (index, queued_request) in request_batch.iter_mut().enumerate() { + let mut response = CallContractResponse::new(); + // TODO: Why doesn't enclave response contain bytes directly? + response + .set_payload((&enclave_response.get_client_response()[index]).write_to_bytes()?); + + response_batch.push(QueuedResponse { + queued_request, + response: Ok(response), + }); + } + + // Check if any state was produced. In case no state was produced, this means that + // no request caused a state update and thus no state update is required. + let encrypted_state = self.contract.db_state_get()?; + if !encrypted_state.is_empty() { + let _consensus_set_timer = self.ins.consensus_set_time.start_timer(); + match orig_encrypted_state_opt { + Some(orig_encrypted_state) => { + let diff_res = self.contract + .db_state_diff(&orig_encrypted_state, &encrypted_state)?; + + self.consensus + .as_ref() + .unwrap() + .add_diff(grpc::RequestOptions::new(), { + let mut add_diff_req = ekiden_consensus_api::AddDiffRequest::new(); + add_diff_req.set_payload(diff_res); + add_diff_req + }) + .wait()?; + } + None => { + let mut consensus_replace_request = ekiden_consensus_api::ReplaceRequest::new(); + consensus_replace_request.set_payload(encrypted_state); + + self.consensus + .as_ref() + .unwrap() + .replace(grpc::RequestOptions::new(), consensus_replace_request) + .wait()?; + } + } + } + + Ok(response_batch) + } + + fn call_contract_batch(&mut self, mut request_batch: Vec) { + // Contains a batch-wide error if one has occurred. + let batch_error: Option; + + { + match self.call_contract_batch_fallible(&mut request_batch) { + Ok(response_batch) => { + // No batch-wide errors. Send out per-call responses. + for queued_response in response_batch { + let sender = queued_response + .queued_request + .response_sender + .take() + .unwrap(); + sender.send(queued_response.response).unwrap(); + } + + return; + } + Err(error) => { + // Batch-wide error has occurred. We cannot handle the error here as we + // must first drop the mutable request_batch reference. + eprintln!("compute: batch-wide error {:?}", error); + + batch_error = Some(error); + } + } + } + + // Send batch-wide error to all clients. + let batch_error = batch_error.as_ref().unwrap(); + for mut queued_request in request_batch { + let sender = queued_request.response_sender.take().unwrap(); + sender.send(Err(batch_error.clone())).unwrap(); + } + } + + /// Process requests from a receiver until the channel closes. + fn work(&mut self, request_receiver: Receiver) { + // Block for the next call. + while let Ok(queued_request) = request_receiver.recv() { + self.ins.reqs_batches_started.inc(); + let _batch_timer = self.ins.req_time_batch.start_timer(); + + let mut request_batch = Vec::new(); + request_batch.push(queued_request); + + // Queue up requests up to MAX_BATCH_SIZE, but for at most MAX_BATCH_TIMEOUT. + let batch_start = time::precise_time_ns(); + while request_batch.len() < self.max_batch_size + && time::precise_time_ns() - batch_start < self.max_batch_timeout + { + while request_batch.len() < self.max_batch_size { + if let Ok(queued_request) = request_receiver.try_recv() { + request_batch.push(queued_request); + } else { + break; + } + } + + // Yield thread for 10 ms while we wait. + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Process the requests. + self.call_contract_batch(request_batch); + } + } +} + +pub struct ComputeServerImpl { + /// Channel for submitting requests to the worker. This is only used to + /// initialize a thread-local clone of the sender handle, so that there + /// is no need for locking during request processing. + request_sender: Mutex>, + /// Thread-local channel for submitting requests to the worker. + tl_request_sender: ThreadLocal>, + /// Instrumentation objects. + ins: instrumentation::HandlerMetrics, +} + +impl ComputeServerImpl { + /// Create new compute server instance. + pub fn new( + contract_filename: &str, + consensus_host: &str, + consensus_port: u16, + max_batch_size: usize, + max_batch_timeout: u64, + ias: IAS, + saved_identity_path: &str, + ) -> Self { + let contract_filename_owned = String::from(contract_filename); + let consensus_host_owned = String::from(consensus_host); + let saved_identity_path_owned = String::from(saved_identity_path); + + let (request_sender, request_receiver) = channel(); + // move request_receiver + std::thread::spawn(move || { + ComputeServerWorker::new( + &contract_filename_owned, + &consensus_host_owned, + consensus_port, + max_batch_size, + max_batch_timeout, + &ias, + &saved_identity_path_owned, + ).work(request_receiver); + }); + + ComputeServerImpl { + request_sender: Mutex::new(request_sender), + tl_request_sender: ThreadLocal::new(), + ins: instrumentation::HandlerMetrics::new(), + } + } + + /// Get thread-local request sender. + fn get_request_sender(&self) -> &Sender { + self.tl_request_sender.get_or(|| { + // Only take the lock when we need to clone the sender for a new thread. + let request_sender = self.request_sender.lock().unwrap(); + Box::new(request_sender.clone()) + }) + } +} + +impl Compute for ComputeServerImpl { + fn call_contract( + &self, + _options: grpc::RequestOptions, + rpc_request: CallContractRequest, + ) -> grpc::SingleResponse { + // Instrumentation. + self.ins.reqs_received.inc(); + let _client_timer = self.ins.req_time_client.start_timer(); + + // Send request to worker thread. + let (response_sender, response_receiver) = oneshot::channel(); + self.get_request_sender() + .send(QueuedRequest { + rpc_request, + response_sender: Some(response_sender), + }) + .unwrap(); + + // Prepare response future. + grpc::SingleResponse::no_metadata(response_receiver.then(|result| match result { + Ok(Ok(response)) => Ok(response), + Ok(Err(error)) => Err(grpc::Error::Panic(error.description().to_owned())), + Err(error) => Err(grpc::Error::Panic(error.description().to_owned())), + })) + } +} diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml new file mode 100644 index 00000000000..2f11e3deaac --- /dev/null +++ b/consensus/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "ekiden-consensus" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden consensus node" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[dependencies] +ekiden-consensus-api = { path = "./api", version = "0.1.0-alpha.1" } +abci = { git = "https://github.com/ekiden/tendermint-abci" } +clap = "2.29.1" +futures = "0.1" +grpc = "0.2.1" +hex = "0.3.1" +hyper = "0.11" +protobuf = "1.4.2" +tls-api = "0.1.12" +tokio-core = "0.1" +tokio-proto = "0.1" + +[build-dependencies] +ekiden-tools = { path = "../tools", version = "0.1.0-alpha.1" } +protoc-rust = "1.4" +protoc-rust-grpc = "0.2.1" + +[target.'cfg(not(target_env = "sgx"))'.build-dependencies] +rand = "0.4.2" + +[lib] +path = "src/lib.rs" + +[[bin]] +name = "ekiden-consensus" +path = "src/main.rs" diff --git a/consensus/Makefile.toml b/consensus/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/consensus/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/consensus/api/Cargo.toml b/consensus/api/Cargo.toml new file mode 100644 index 00000000000..210c3d3bee1 --- /dev/null +++ b/consensus/api/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ekiden-consensus-api" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden consensus node API" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[dependencies] +protobuf = "1.4.2" +grpc = "0.2.1" +tls-api = "0.1.12" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } +protoc-rust = "1.4" +protoc-rust-grpc = "0.2.1" diff --git a/consensus/api/Makefile.toml b/consensus/api/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/consensus/api/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/consensus/api/build.rs b/consensus/api/build.rs new file mode 100644 index 00000000000..d9c9448a8df --- /dev/null +++ b/consensus/api/build.rs @@ -0,0 +1,15 @@ +extern crate ekiden_tools; +extern crate protoc_rust_grpc; + +fn main() { + // Generate module file. + // Must be done first to create src/generated directory + ekiden_tools::generate_mod("src/generated", &["consensus", "consensus_grpc"]); + + protoc_rust_grpc::run(protoc_rust_grpc::Args { + out_dir: "src/generated/", + includes: &[], + input: &["src/consensus.proto"], + rust_protobuf: true, + }).expect("protoc-rust-grpc"); +} diff --git a/consensus/api/src/consensus.proto b/consensus/api/src/consensus.proto new file mode 100644 index 00000000000..e29e1ab0e6a --- /dev/null +++ b/consensus/api/src/consensus.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package consensus; + +message StoredTx { + oneof stored { + bytes replace = 1; + bytes diff = 2; + bytes checkpoint = 3; + } +} + +service Consensus { + rpc Get (GetRequest) returns (GetResponse) {} + rpc GetDiffs (GetDiffsRequest) returns (GetDiffsResponse) {} + rpc Replace (ReplaceRequest) returns (ReplaceResponse) {} + rpc AddDiff (AddDiffRequest) returns (AddDiffResponse) {} +} + +message Checkpoint { + bytes payload = 1; + uint64 height = 2; +} + +message GetRequest { +} + +message GetResponse { + Checkpoint checkpoint = 1; + repeated bytes diffs = 2; +} + +message GetDiffsRequest { + uint64 since_height = 1; +} + +message GetDiffsResponse { + Checkpoint checkpoint = 1; + repeated bytes diffs = 2; +} + +message ReplaceRequest { + bytes payload = 1; +} + +message ReplaceResponse { +} + +message AddDiffRequest { + bytes payload = 1; +} + +message AddDiffResponse { +} diff --git a/consensus/api/src/lib.rs b/consensus/api/src/lib.rs new file mode 100644 index 00000000000..e94c609df7a --- /dev/null +++ b/consensus/api/src/lib.rs @@ -0,0 +1,8 @@ +extern crate grpc; +extern crate protobuf; +extern crate tls_api; + +mod generated; + +pub use generated::consensus::*; +pub use generated::consensus_grpc::*; diff --git a/consensus/benches/benchmarks.rs b/consensus/benches/benchmarks.rs new file mode 100644 index 00000000000..d325882f2be --- /dev/null +++ b/consensus/benches/benchmarks.rs @@ -0,0 +1,83 @@ +#![feature(test)] + +extern crate consensus as lib; +extern crate grpc; +extern crate rand; +extern crate test; + +use rand::Rng; +use std::{thread, time}; +use test::Bencher; + +use lib::generated::consensus; +use lib::generated::consensus_grpc; +use lib::generated::consensus_grpc::Consensus; + +fn spawn_client_server() -> consensus_grpc::ConsensusClient { + let config = lib::Config { + tendermint_host: String::from("localhost"), + tendermint_port: 46657, + tendermint_abci_port: 46658, + grpc_port: 9002, + no_tendermint: true, + artificial_delay: 100, + }; + let client_port = config.grpc_port; + let _server_handle = thread::spawn(move || { + lib::run(&config).unwrap(); + }); + // Give time for Tendermint to connect + thread::sleep(time::Duration::from_millis(3000)); + + consensus_grpc::ConsensusClient::new_plain("localhost", client_port, Default::default()) + .unwrap() +} + +#[bench] +fn benchmark_get(b: &mut Bencher) { + let client = spawn_client_server(); + + // Set state to `helloworld` + let mut req = consensus::ReplaceRequest::new(); + req.set_payload(String::from("helloworld").into_bytes()); + client + .replace(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + b.iter(move || { + let req = consensus::GetRequest::new(); + let (_, resp, _) = client.get(grpc::RequestOptions::new(), req).wait().unwrap(); + assert_eq!( + resp.get_checkpoint().get_payload(), + String::from("helloworld").as_bytes() + ); + }); + + // See https://github.com/sunblaze-ucb/ekiden/issues/223 + // We can't gracefully shut down the server yet. + panic!("Test passed, just need to panic to get out"); + //server_handle.join(); +} + +#[bench] +fn benchmark_replace(b: &mut Bencher) { + let client = spawn_client_server(); + b.iter(move || { + let s = rand::thread_rng() + .gen_ascii_chars() + .take(10) + .collect::(); + let mut req = consensus::ReplaceRequest::new(); + req.set_payload(s.into_bytes()); + client + .replace(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + }); + + // See https://github.com/sunblaze-ucb/ekiden/issues/223 + // We can't gracefully shut down the server yet. + panic!("Test passed, just need to panic to get out"); + //server_handle.join(); +} diff --git a/consensus/build.rs b/consensus/build.rs new file mode 100644 index 00000000000..a95098d253f --- /dev/null +++ b/consensus/build.rs @@ -0,0 +1,15 @@ +extern crate ekiden_tools; +extern crate protoc_rust_grpc; + +fn main() { + // Generate module file. + // Must be done first to create src/generated directory + ekiden_tools::generate_mod("src/generated", &["tendermint", "tendermint_grpc"]); + + protoc_rust_grpc::run(protoc_rust_grpc::Args { + out_dir: "src/generated/", + includes: &[], + input: &["src/tendermint.proto"], + rust_protobuf: true, + }).expect("protoc-rust-grpc"); +} diff --git a/consensus/src/ekidenmint.rs b/consensus/src/ekidenmint.rs new file mode 100644 index 00000000000..bb2bc3c106d --- /dev/null +++ b/consensus/src/ekidenmint.rs @@ -0,0 +1,159 @@ +// Tendermint ABCI Application for Ekiden +// This is a short-lived facade object, so all state needs to be protected by Arc/Mutex +// For reference on how to use the ABCI +// https://github.com/tendermint/abci +// https://github.com/tendermint/basecoin/ +use abci::application::Application; +use abci::types; +use protobuf; +use std; +use std::sync::{Arc, Mutex}; + +use ekiden_consensus_api::StoredTx; + +use state; + +//#[derive(Copy, Clone)] +#[derive(Clone)] +pub struct Ekidenmint { + state: Arc>, +} + +impl Ekidenmint { + pub fn new(state: Arc>) -> Ekidenmint { + Ekidenmint { state: state } + } + + pub fn deliver_tx_fallible(&self, tx: &[u8]) -> Result<(), Box> { + state::State::check_tx(tx)?; + let mut stored: StoredTx = protobuf::parse_from_bytes(tx)?; + // Set the state + let mut s = self.state.lock().unwrap(); + if stored.has_replace() { + let current_height = match s.everything.as_ref() { + Some(si) => si.checkpoint_height + si.diffs.len() as u64, + None => 0, + }; + s.everything = Some(state::StateInitialized { + checkpoint: stored.take_replace(), + checkpoint_height: current_height + 1, + diffs: Vec::new(), + }); + Ok(()) + } else if stored.has_diff() { + let si = s.everything + .as_mut() + .ok_or::>(From::from( + "Can't add diff to uninitialized state.", + ))?; + si.diffs.push(stored.take_diff()); + Ok(()) + } else if stored.has_checkpoint() { + let si = s.everything + .as_mut() + .ok_or::>(From::from( + "Can't checkpoint uninitialized state.", + ))?; + si.checkpoint = stored.take_checkpoint(); + si.checkpoint_height += si.diffs.len() as u64; + si.diffs.clear(); + Ok(()) + } else { + Err(From::from("Unrecognized StoredTx variant")) + } + } +} + +impl Application for Ekidenmint { + fn info(&self, _req: &types::RequestInfo) -> types::ResponseInfo { + // @todo - supposed to return information about app state + // https://github.com/tendermint/abci + println!("info"); + types::ResponseInfo::new() + } + + fn set_option(&self, req: &types::RequestSetOption) -> types::ResponseSetOption { + // @todo - Set application options + // https://github.com/tendermint/abci + println!("set_option {}:{}", req.get_key(), req.get_value()); + types::ResponseSetOption::new() + } + + fn query(&self, _p: &types::RequestQuery) -> types::ResponseQuery { + // @todo - handle query requests + // https://github.com/tendermint/abci + println!("query"); + types::ResponseQuery::new() + } + + fn check_tx(&self, p: &types::RequestCheckTx) -> types::ResponseCheckTx { + let mut resp = types::ResponseCheckTx::new(); + match state::State::check_tx(p.get_tx()) { + Ok(_) => { + resp.set_code(types::CodeType::OK); + } + Err(error) => { + resp.set_code(types::CodeType::BaseInvalidInput); + resp.set_log(error); + } + } + return resp; + } + + fn init_chain(&self, _p: &types::RequestInitChain) -> types::ResponseInitChain { + // Plugin support in https://github.com/tendermint/basecoin/blob/master/app/app.go + //println!("init_chain"); + types::ResponseInitChain::new() + } + + fn begin_block(&self, _p: &types::RequestBeginBlock) -> types::ResponseBeginBlock { + // Plugin support in https://github.com/tendermint/basecoin/blob/master/app/app.go + //println!("begin_block"); + types::ResponseBeginBlock::new() + } + + fn deliver_tx(&self, p: &types::RequestDeliverTx) -> types::ResponseDeliverTx { + println!("deliver_tx"); + let mut resp = types::ResponseDeliverTx::new(); + let tx = p.get_tx(); + match self.deliver_tx_fallible(tx) { + Ok(_) => { + resp.set_code(types::CodeType::OK); + } + Err(e) => { + resp.set_code(types::CodeType::BaseEncodingError); + resp.set_log(e.description().to_owned()); + } + } + return resp; + } + + fn end_block(&self, _p: &types::RequestEndBlock) -> types::ResponseEndBlock { + // Plugin support in https://github.com/tendermint/basecoin/blob/master/app/app.go + //println!("end_block"); + types::ResponseEndBlock::new() + } + + fn commit(&self, _p: &types::RequestCommit) -> types::ResponseCommit { + // RequestCommit is empty + println!("commit"); + let resp = types::ResponseCommit::new(); + // @todo - respond with Merkle root hash of the application state in `data` + //resp.set_code(types::CodeType::OK); + //resp.set_data(String::from("test data").into_bytes()); + //resp.set_log(String::from("test log")); + return resp; + } + + fn echo(&self, p: &types::RequestEcho) -> types::ResponseEcho { + let mut response = types::ResponseEcho::new(); + response.set_message(p.get_message().to_owned()); + return response; + } + + fn flush(&self, _p: &types::RequestFlush) -> types::ResponseFlush { + // Appears to be unused in https://github.com/tendermint/basecoin/blob/master/app/app.go + //println!("flush"); + types::ResponseFlush::new() + } +} diff --git a/consensus/src/errors.rs b/consensus/src/errors.rs new file mode 100644 index 00000000000..e49941d5e3b --- /dev/null +++ b/consensus/src/errors.rs @@ -0,0 +1,52 @@ +use std; +use std::string; + +use hyper; + +#[derive(Debug)] +pub enum Error { + HyperError(hyper::Error), + HyperUriError(hyper::error::UriError), + StringError(string::FromUtf8Error), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Debug::fmt(self, f) + } +} + +impl std::error::Error for Error { + fn description(&self) -> &str { + match self { + &Error::HyperError(ref e) => e.description(), + &Error::HyperUriError(ref e) => e.description(), + &Error::StringError(ref e) => e.description(), + } + } + fn cause(&self) -> Option<&std::error::Error> { + match self { + &Error::HyperError(ref e) => Some(e), + &Error::HyperUriError(ref e) => Some(e), + &Error::StringError(ref e) => Some(e), + } + } +} + +impl From for Error { + fn from(error: hyper::Error) -> Self { + Error::HyperError(error) + } +} + +impl From for Error { + fn from(error: hyper::error::UriError) -> Self { + Error::HyperUriError(error) + } +} + +impl From for Error { + fn from(error: string::FromUtf8Error) -> Self { + Error::StringError(error) + } +} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs new file mode 100644 index 00000000000..498deb25c21 --- /dev/null +++ b/consensus/src/lib.rs @@ -0,0 +1,102 @@ +extern crate abci; +extern crate futures; +extern crate grpc; +extern crate hyper; +extern crate protobuf; +extern crate tls_api; +extern crate tokio_core; +extern crate tokio_proto; + +extern crate ekiden_consensus_api; + +mod ekidenmint; +mod errors; +mod tendermint; +pub mod generated; +mod rpc; +mod state; + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::{Arc, Mutex}; +use std::sync::mpsc; +use std::thread; +use std::time; + +use abci::server::{AbciProto, AbciService}; +use tokio_proto::TcpServer; + +use ekiden_consensus_api::ConsensusServer; +use errors::Error; +use generated::tendermint::ResponseBroadcastTx; +use rpc::ConsensusServerImpl; +use state::State; +use tendermint::TendermintProxy; + +#[derive(Debug)] +pub struct Config { + pub tendermint_host: String, + pub tendermint_port: u16, + pub tendermint_abci_port: u16, + pub grpc_port: u16, + pub no_tendermint: bool, + pub artificial_delay: u64, +} + +pub fn run(config: &Config) -> Result<(), Box> { + // Create a shared State object and ekidenmint + let state = Arc::new(Mutex::new(State::new())); + let delay = time::Duration::from_millis(config.artificial_delay); + + // Create new channel (gRPC broadcast => Tendermint/Ekidenmint). + let (sender, receiver) = mpsc::channel(); + + // Start the Ekiden consensus gRPC server. + let mut rpc_server = grpc::ServerBuilder::new_plain(); + rpc_server.http.set_port(config.grpc_port); + rpc_server.http.set_cpu_pool_threads(1); + rpc_server.add_service(ConsensusServer::new_service_def(ConsensusServerImpl::new( + Arc::clone(&state), + sender, + ))); + let _server = rpc_server.build().expect("rpc_server"); + + // Short circuit Tendermint if `-x` is enabled + if config.no_tendermint { + let app = ekidenmint::Ekidenmint::new(Arc::clone(&state)); + // Setup short circuit + for req in receiver { + thread::sleep(delay); + app.deliver_tx_fallible(&req.payload).unwrap(); + req.response.send(Ok(ResponseBroadcastTx::new())).unwrap(); + } + return Ok(()); + } + + // Create Tendermint proxy/app. + let _tendermint = + TendermintProxy::new(&config.tendermint_host, config.tendermint_port, receiver); + + // Start the Tendermint ABCI listener + let abci_listen_addr = SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + config.tendermint_abci_port, + ); + let mut app_server = TcpServer::new(AbciProto, abci_listen_addr); + app_server.threads(1); + app_server.serve(move || { + Ok(AbciService { + app: Box::new(ekidenmint::Ekidenmint::new(Arc::clone(&state))), + }) + }); + Ok(()) +} + +#[cfg(test)] +mod tests { + //use super::generated::consensus; + + #[test] + fn empty() { + assert_eq!(8, 8) + } +} diff --git a/consensus/src/main.rs b/consensus/src/main.rs new file mode 100644 index 00000000000..5fb74da5ea3 --- /dev/null +++ b/consensus/src/main.rs @@ -0,0 +1,67 @@ +#[macro_use] +extern crate clap; +extern crate ekiden_consensus; + +use clap::{App, Arg}; + +fn main() { + let matches = App::new("Ekiden Compute Node") + .version("0.1.0") + .about("Ekiden consensus node") + .arg( + Arg::with_name("tendermint-host") + .long("tendermint-host") + .takes_value(true) + .default_value("localhost"), + ) + .arg( + Arg::with_name("tendermint-port") + .long("tendermint-port") + .takes_value(true) + .default_value("46657"), + ) + .arg( + Arg::with_name("tendermint-abci-port") + .long("tendermint-abci-port") + .takes_value(true) + .default_value("46658"), + ) + .arg( + Arg::with_name("grpc-port") + .long("grpc-port") + .takes_value(true) + .default_value("9002"), + ) + .arg( + Arg::with_name("no-tendermint") + .long("no-tendermint") + .short("x"), + ) + .arg( + Arg::with_name("artificial-delay") + .long("artificial-delay") + .help("Artifical delay injected before delivering transactions (ms)") + .takes_value(true) + .default_value("0"), + ) + .get_matches(); + + let config = ekiden_consensus::Config { + tendermint_host: matches.value_of("tendermint-host").unwrap().to_string(), + tendermint_port: value_t!(matches, "tendermint-port", u16).unwrap_or_else(|e| e.exit()), + tendermint_abci_port: value_t!(matches, "tendermint-abci-port", u16) + .unwrap_or_else(|e| e.exit()), + grpc_port: value_t!(matches, "grpc-port", u16).unwrap_or_else(|e| e.exit()), + no_tendermint: { matches.occurrences_of("no-tendermint") > 0 }, + artificial_delay: value_t!(matches, "artificial-delay", u64).unwrap_or_else(|e| e.exit()), + }; + + println!( + "Ekiden Consensus Node starting on port {} ... ", + config.grpc_port + ); + if let Err(e) = ekiden_consensus::run(&config) { + eprintln!("Application error: {}", e); + std::process::exit(1); + } +} diff --git a/consensus/src/rpc.rs b/consensus/src/rpc.rs new file mode 100644 index 00000000000..8f9f58f95ea --- /dev/null +++ b/consensus/src/rpc.rs @@ -0,0 +1,153 @@ +use std; +use std::sync::{mpsc, Arc, Mutex}; + +use grpc; +use protobuf::{self, Message}; + +use ekiden_consensus_api::{self, Consensus}; + +use super::state; + +use super::tendermint::BroadcastRequest; + +pub struct ConsensusServerImpl { + state: Arc>, + // TODO: Clone the sender for each thread and store it in thread-local storage. + broadcast_channel: Mutex>, +} + +impl ConsensusServerImpl { + pub fn new( + state: Arc>, + broadcast_channel: mpsc::Sender, + ) -> ConsensusServerImpl { + ConsensusServerImpl { + state: state, + broadcast_channel: Mutex::new(broadcast_channel), + } + } + + fn replace_fallible( + &self, + payload: Vec, + ) -> Result> { + let mut stored = ekiden_consensus_api::StoredTx::new(); + stored.set_replace(payload); + let stored_bytes = stored.write_to_bytes()?; + + // check attestation - early reject + state::State::check_tx(&stored_bytes)?; + + // Create a one-shot channel for response. + let (tx, rx) = mpsc::channel(); + let req = BroadcastRequest { + response: tx, + payload: stored_bytes, + }; + + let broadcast_channel = self.broadcast_channel.lock().unwrap(); + broadcast_channel.send(req).unwrap(); + rx.recv().unwrap()?; + + Ok(ekiden_consensus_api::ReplaceResponse::new()) + } + + fn add_diff_fallible( + &self, + payload: Vec, + ) -> Result> { + let mut stored = ekiden_consensus_api::StoredTx::new(); + stored.set_diff(payload); + let stored_bytes = stored.write_to_bytes()?; + + // check attestation - early reject + state::State::check_tx(&stored_bytes)?; + + // Create a one-shot channel for response. + let (tx, rx) = mpsc::channel(); + let req = BroadcastRequest { + response: tx, + payload: stored_bytes, + }; + + let broadcast_channel = self.broadcast_channel.lock().unwrap(); + broadcast_channel.send(req).unwrap(); + rx.recv().unwrap()?; + + Ok(ekiden_consensus_api::AddDiffResponse::new()) + } +} + +impl Consensus for ConsensusServerImpl { + fn get( + &self, + _options: grpc::RequestOptions, + _req: ekiden_consensus_api::GetRequest, + ) -> grpc::SingleResponse { + let s = self.state.lock().unwrap(); + match s.everything { + Some(ref si) => { + let mut response = ekiden_consensus_api::GetResponse::new(); + { + let mut checkpoint = response.mut_checkpoint(); + checkpoint.set_payload(si.checkpoint.clone()); + checkpoint.set_height(si.checkpoint_height); + } + response.set_diffs(protobuf::RepeatedField::from_vec(si.diffs.clone())); + grpc::SingleResponse::completed(response) + } + None => grpc::SingleResponse::err(grpc::Error::Other("State not initialized.")), + } + } + + fn get_diffs( + &self, + _options: grpc::RequestOptions, + req: ekiden_consensus_api::GetDiffsRequest, + ) -> grpc::SingleResponse { + let s = self.state.lock().unwrap(); + match s.everything { + Some(ref si) => { + let mut response = ekiden_consensus_api::GetDiffsResponse::new(); + if si.checkpoint_height > req.get_since_height() { + // We don't have diffs going back far enough. + { + let mut checkpoint = response.mut_checkpoint(); + checkpoint.set_payload(si.checkpoint.clone()); + checkpoint.set_height(si.checkpoint_height); + } + response.set_diffs(protobuf::RepeatedField::from_vec(si.diffs.clone())); + } else { + let num_known = req.get_since_height() - si.checkpoint_height; + response.set_diffs(protobuf::RepeatedField::from_vec( + si.diffs[num_known as usize..].to_vec(), + )); + } + grpc::SingleResponse::completed(response) + } + None => grpc::SingleResponse::err(grpc::Error::Other("State not initialized.")), + } + } + + fn replace( + &self, + _options: grpc::RequestOptions, + req: ekiden_consensus_api::ReplaceRequest, + ) -> grpc::SingleResponse { + match self.replace_fallible(req.get_payload().to_vec()) { + Ok(res) => grpc::SingleResponse::completed(res), + Err(e) => grpc::SingleResponse::err(grpc::Error::Panic(e.description().to_owned())), + } + } + + fn add_diff( + &self, + _options: grpc::RequestOptions, + req: ekiden_consensus_api::AddDiffRequest, + ) -> grpc::SingleResponse { + match self.add_diff_fallible(req.get_payload().to_vec()) { + Ok(res) => grpc::SingleResponse::completed(res), + Err(e) => grpc::SingleResponse::err(grpc::Error::Panic(e.description().to_owned())), + } + } +} diff --git a/consensus/src/state.rs b/consensus/src/state.rs new file mode 100644 index 00000000000..0c08e2bfaac --- /dev/null +++ b/consensus/src/state.rs @@ -0,0 +1,21 @@ +pub struct StateInitialized { + pub checkpoint: Vec, + pub checkpoint_height: u64, + pub diffs: Vec>, +} + +pub struct State { + pub everything: Option, +} + +impl State { + pub fn new() -> Self { + State { everything: None } + } + + pub fn check_tx(_tx: &[u8]) -> Result<(), String> { + // @todo - check attestations + // @todo - check that this was based off latest + Ok(()) + } +} diff --git a/consensus/src/tendermint.proto b/consensus/src/tendermint.proto new file mode 100644 index 00000000000..a5c46b5c59d --- /dev/null +++ b/consensus/src/tendermint.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package core_grpc; + +// Based on https://github.com/tendermint/tendermint/blob/master/rpc/grpc/types.proto. + +//---------------------------------------- +// Message types + +//---------------------------------------- +// Abstract types + +message KVPair { + string key = 1; + enum Type { + STRING = 0; + INT = 1; + } + Type value_type = 2; + string value_string = 3; + int64 value_int = 4; +} + +//---------------------------------------- +// Request types + +message RequestPing { +} + +message RequestBroadcastTx { + bytes tx = 1; +} + +//---------------------------------------- +// Response types + +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; + int64 gas = 4; + int64 fee = 5; +} + +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; + repeated KVPair tags = 4; +} + +message ResponsePing { +} + +message ResponseBroadcastTx { + ResponseCheckTx check_tx = 1; + ResponseDeliverTx deliver_tx = 2; +} + +//---------------------------------------- +// Service Definition + +service BroadcastAPI { + rpc Ping(RequestPing) returns (ResponsePing) ; + rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ; +} diff --git a/consensus/src/tendermint.rs b/consensus/src/tendermint.rs new file mode 100644 index 00000000000..b06394515bd --- /dev/null +++ b/consensus/src/tendermint.rs @@ -0,0 +1,51 @@ +use std::sync::mpsc::{Receiver, Sender}; +use std::thread; + +use grpc; + +use super::generated::tendermint::{RequestBroadcastTx, ResponseBroadcastTx}; +use super::generated::tendermint_grpc::{BroadcastAPI, BroadcastAPIClient}; + +/// Broadcast request that can be sent via the proxy. +#[derive(Debug)] +pub struct BroadcastRequest { + /// Raw broadcast payload. + pub payload: Vec, + /// Channel for sending the response. + pub response: Sender>, +} + +/// Proxy that runs the tendermint client in a separate thread. +pub struct TendermintProxy {} + +impl TendermintProxy { + /// Create a new Tendermint proxy instance. + pub fn new(host: &str, port: u16, queue: Receiver) -> Self { + let proxy = TendermintProxy {}; + proxy.start(host, port, queue); + proxy + } + + /// Start the proxy worker thread. + fn start(&self, host: &str, port: u16, queue: Receiver) { + let client = BroadcastAPIClient::new_plain(&host, port, Default::default()).unwrap(); + + thread::spawn(move || { + // Process requests in queue. + for request in queue { + let mut broadcast_request = RequestBroadcastTx::new(); + broadcast_request.set_tx(request.payload); + + let response = match client + .broadcast_tx(grpc::RequestOptions::new(), broadcast_request) + .wait() + { + Ok((_, response, _)) => Ok(response), + Err(error) => Err(error), + }; + + request.response.send(response).unwrap(); + } + }); + } +} diff --git a/consensus/tests/integration_test.rs b/consensus/tests/integration_test.rs new file mode 100644 index 00000000000..0e13dd6e03e --- /dev/null +++ b/consensus/tests/integration_test.rs @@ -0,0 +1,138 @@ +extern crate consensus as lib; +extern crate grpc; + +use std::{thread, time}; + +use lib::generated::consensus; +use lib::generated::consensus_grpc; +use lib::generated::consensus_grpc::Consensus; + +#[test] +fn processes_requests() { + let config = lib::Config { + tendermint_host: String::from("localhost"), + tendermint_port: 46657, + tendermint_abci_port: 46658, + grpc_port: 9002, + no_tendermint: true, + artificial_delay: 0, + }; + let client_port = config.grpc_port; + + let _server_handle = thread::spawn(move || { + lib::run(&config).unwrap(); + }); + + // Give time for Tendermint to connect + thread::sleep(time::Duration::from_millis(3000)); + + let client = + consensus_grpc::ConsensusClient::new_plain("localhost", client_port, Default::default()) + .unwrap(); + + // Get latest state - should be empty + let req = consensus::GetRequest::new(); + match client.get(grpc::RequestOptions::new(), req).wait() { + Ok(_resp) => { + panic!("First `get` should return an error"); + } + Err(_err) => { + assert!(true); + } + } + + // Get diffs - should be empty + let mut req = consensus::GetDiffsRequest::new(); + req.set_since_height(0); + match client.get_diffs(grpc::RequestOptions::new(), req).wait() { + Ok(_resp) => { + panic!("First `get` should return an error"); + } + Err(_err) => { + assert!(true); + } + } + + // Set state to `helloworld` + let mut req = consensus::ReplaceRequest::new(); + req.set_payload(String::from("helloworld").into_bytes()); + client + .replace(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + let req = consensus::GetRequest::new(); + let (_, resp, _) = client.get(grpc::RequestOptions::new(), req).wait().unwrap(); + assert_eq!( + resp.get_checkpoint().get_payload(), + String::from("helloworld").as_bytes() + ); + + // Set state to `successor` + let mut req = consensus::ReplaceRequest::new(); + req.set_payload(String::from("successor").into_bytes()); + client + .replace(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + // Add `diff1` + let mut req = consensus::AddDiffRequest::new(); + req.set_payload(String::from("diff1").into_bytes()); + client + .add_diff(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + // Add `diff2` + let mut req = consensus::AddDiffRequest::new(); + req.set_payload(String::from("diff2").into_bytes()); + client + .add_diff(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + // Call get, check checkpoint, diffs + let req = consensus::GetRequest::new(); + let (_, resp, _) = client.get(grpc::RequestOptions::new(), req).wait().unwrap(); + assert_eq!( + resp.get_checkpoint().get_payload(), + String::from("successor").as_bytes() + ); + assert_eq!(resp.get_checkpoint().get_height(), 2); + assert_eq!(resp.get_diffs().len(), 2); + assert_eq!(resp.get_diffs()[0], String::from("diff1").as_bytes()); + assert_eq!(resp.get_diffs()[1], String::from("diff2").as_bytes()); + + // Call get_diffs + let mut req = consensus::GetDiffsRequest::new(); + req.set_since_height(3); + let (_, resp, _) = client + .get_diffs(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + assert_eq!(resp.get_diffs().len(), 1); + assert_eq!(resp.get_diffs()[0], String::from("diff2").as_bytes()); + + // Set state to a sequence of all byte values + let mut scale: Vec = vec![0; 256]; + for i in 0..256 { + scale[i] = i as u8; + } + + let mut req = consensus::ReplaceRequest::new(); + req.set_payload(scale.clone()); + client + .replace(grpc::RequestOptions::new(), req) + .wait() + .unwrap(); + + let req = consensus::GetRequest::new(); + let (_, resp, _) = client.get(grpc::RequestOptions::new(), req).wait().unwrap(); + assert_eq!(resp.get_checkpoint().get_payload(), &scale[..]); + + // See https://github.com/sunblaze-ucb/ekiden/issues/223 + // We can't gracefully shut down the server yet. + panic!("Test passed, just need to panic to get out"); + //server_handle.join(); +} diff --git a/contracts/key-manager/Cargo.toml b/contracts/key-manager/Cargo.toml new file mode 100644 index 00000000000..54507f89fc0 --- /dev/null +++ b/contracts/key-manager/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "ekiden-key-manager" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden key manager" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[features] +default = [] + +[dependencies] +ekiden-core = { path = "../../core/common", version = "0.1.0-alpha.1" } +ekiden-trusted = { path = "../../core/trusted", version = "0.1.0-alpha.1" } +ekiden-key-manager-api = { path = "./api", version = "0.1.0-alpha.1" } +lazy_static = { version = "1.0", features = ["spin_no_std"] } +protobuf = "1.4.3" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } +ekiden-edl = { path = "../../core/edl", version = "0.1.0-alpha.1" } diff --git a/contracts/key-manager/Makefile.toml b/contracts/key-manager/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/contracts/key-manager/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/contracts/key-manager/api/Cargo.toml b/contracts/key-manager/api/Cargo.toml new file mode 100644 index 00000000000..4d9b8e637f2 --- /dev/null +++ b/contracts/key-manager/api/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "ekiden-key-manager-api" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden key manager API" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[dependencies] +ekiden-core = { path = "../../../core/common", version = "0.1.0-alpha.1" } +protobuf = "1.4.3" + +[build-dependencies] +ekiden-tools = { path = "../../../tools", version = "0.1.0-alpha.1" } diff --git a/contracts/key-manager/api/build.rs b/contracts/key-manager/api/build.rs new file mode 100644 index 00000000000..4921aeb2825 --- /dev/null +++ b/contracts/key-manager/api/build.rs @@ -0,0 +1,6 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::generate_mod("src/generated", &["api"]); + ekiden_tools::build_api(); +} diff --git a/contracts/key-manager/api/src/api.proto b/contracts/key-manager/api/src/api.proto new file mode 100644 index 00000000000..4ae56f2f883 --- /dev/null +++ b/contracts/key-manager/api/src/api.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package key_manager; + +message GetOrCreateKeyRequest { + // Key name. + string name = 1; + // Key size. + uint32 size = 2; +} + +message GetOrCreateKeyResponse { + // Key in clear text. Note that keys can be sent like this as the + // key manager requires a mutually authenticated secure channel. + bytes key = 1; +} diff --git a/contracts/key-manager/api/src/api.rs b/contracts/key-manager/api/src/api.rs new file mode 100644 index 00000000000..5975038fab6 --- /dev/null +++ b/contracts/key-manager/api/src/api.rs @@ -0,0 +1,11 @@ +use ekiden_core::rpc::rpc_api; + +rpc_api! { + metadata { + name = key_manager; + version = "0.1.0"; + client_attestation_required = true; + } + + rpc get_or_create_key(GetOrCreateKeyRequest) -> GetOrCreateKeyResponse; +} diff --git a/contracts/key-manager/api/src/lib.rs b/contracts/key-manager/api/src/lib.rs new file mode 100644 index 00000000000..6fc046da40d --- /dev/null +++ b/contracts/key-manager/api/src/lib.rs @@ -0,0 +1,12 @@ +#![feature(use_extern_macros)] + +extern crate protobuf; + +#[macro_use] +extern crate ekiden_core; + +#[macro_use] +mod api; +mod generated; + +pub use generated::api::{GetOrCreateKeyRequest, GetOrCreateKeyResponse}; diff --git a/contracts/key-manager/build.rs b/contracts/key-manager/build.rs new file mode 100644 index 00000000000..0025f429cef --- /dev/null +++ b/contracts/key-manager/build.rs @@ -0,0 +1,6 @@ +extern crate ekiden_edl; +extern crate ekiden_tools; + +fn main() { + ekiden_tools::build_trusted(ekiden_edl::edl()); +} diff --git a/contracts/key-manager/client/Cargo.toml b/contracts/key-manager/client/Cargo.toml new file mode 100644 index 00000000000..9ce51c4366d --- /dev/null +++ b/contracts/key-manager/client/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "ekiden-key-manager-client" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden key manager client" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-common = { path = "../../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../../../enclave/common", version = "0.1.0-alpha.1" } +ekiden-rpc-client = { path = "../../../rpc/client", version = "0.1.0-alpha.1" } +ekiden-rpc-common = { path = "../../../rpc/common", version = "0.1.0-alpha.1" } +ekiden-rpc-trusted = { path = "../../../rpc/trusted", version = "0.1.0-alpha.1" } +ekiden-key-manager-api = { path = "../api", version = "0.1.0-alpha.1" } +lazy_static = { version = "1.0", features = ["spin_no_std"] } diff --git a/contracts/key-manager/client/src/client.rs b/contracts/key-manager/client/src/client.rs new file mode 100644 index 00000000000..b9aecd5b41d --- /dev/null +++ b/contracts/key-manager/client/src/client.rs @@ -0,0 +1,136 @@ +use std::collections::HashMap; +use std::collections::hash_map::Entry; +#[cfg(not(target_env = "sgx"))] +use std::sync::{Mutex, MutexGuard}; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutexGuard as MutexGuard; + +use ekiden_common::error::{Error, Result}; +use ekiden_enclave_common::quote::MrEnclave; +use ekiden_key_manager_api::with_api; +use ekiden_rpc_client::{create_client_rpc, FutureExtra}; +use ekiden_rpc_common::client::ClientEndpoint; +use ekiden_rpc_trusted::client::OcallContractClientBackend; + +// Create API client for the key manager. +with_api! { + create_client_rpc!(key_manager, ekiden_key_manager_api, api); +} + +/// Key manager client interface. +pub struct KeyManager { + /// Key manager contract MRENCLAVE. + mr_enclave: Option, + /// Internal API client. + client: Option>, + /// Local key cache. + cache: HashMap>, +} + +lazy_static! { + // Global key store object. + static ref KEY_MANAGER: Mutex = Mutex::new(KeyManager::new()); +} + +impl KeyManager { + /// Construct new key manager interface. + fn new() -> Self { + KeyManager { + mr_enclave: None, + client: None, + cache: HashMap::new(), + } + } + + /// Establish a connection with the key manager contract. + /// + /// This will establish a mutually authenticated secure channel with the key manager + /// contract, so this operation may fail due to the key manager being unavailable or + /// issues with establishing a mutually authenticated secure channel. + fn connect(&mut self) -> Result<()> { + let mr_enclave = match self.mr_enclave { + Some(ref mr_enclave) => mr_enclave.clone(), + None => { + return Err(Error::new( + "Tried to call key manager without known manager identity", + )) + } + }; + + if self.client.is_some() { + return Ok(()); + } + + let backend = match OcallContractClientBackend::new(ClientEndpoint::KeyManager) { + Ok(backend) => backend, + _ => return Err(Error::new("Failed to create key manager client backend")), + }; + + let client = key_manager::Client::new(backend, mr_enclave); + self.client.get_or_insert(client); + + Ok(()) + } + + /// Configures identity of key manager contract. + /// + /// **This method must be called before the key manager client can be used.** + pub fn set_contract(&mut self, mr_enclave: MrEnclave) { + self.mr_enclave.get_or_insert(mr_enclave); + } + + /// Get global key manager client instance. + /// + /// Calling this method will take a lock on the global instance, which will + /// be released once the value goes out of scope. + pub fn get<'a>() -> Result> { + Ok(KEY_MANAGER.lock().unwrap()) + } + + /// Clear local key cache. + /// + /// This will make the client re-fetch the keys from the key manager. + pub fn clear_cache(&mut self) { + self.cache.clear(); + } + + /// Get or create named key. + /// + /// If the key does not yet exist, the key manager will generate one. If + /// the key has already been cached locally, it will be retrieved from + /// cache. + pub fn get_or_create_key(&mut self, name: &str, size: usize) -> Result> { + // Ensure manager is connected. + self.connect()?; + + // Check cache first. + match self.cache.entry(name.to_string()) { + Entry::Occupied(entry) => Ok(entry.get().clone()), + Entry::Vacant(entry) => { + // No entry in cache, fetch from key manager. + let mut request = key_manager::GetOrCreateKeyRequest::new(); + request.set_name(name.to_string()); + request.set_size(size as u32); + + let mut response = match self.client + .as_mut() + .unwrap() + .get_or_create_key(request) + .wait() + { + Ok(response) => response, + Err(error) => { + return Err(Error::new(format!( + "Failed to call key manager: {}", + error.message + ))) + } + }; + + Ok(entry.insert(response.take_key()).clone()) + } + } + } +} diff --git a/contracts/key-manager/client/src/lib.rs b/contracts/key-manager/client/src/lib.rs new file mode 100644 index 00000000000..eae9bd36be9 --- /dev/null +++ b/contracts/key-manager/client/src/lib.rs @@ -0,0 +1,41 @@ +#![feature(use_extern_macros)] + +#[macro_use] +extern crate lazy_static; + +extern crate ekiden_common; +extern crate ekiden_enclave_common; +extern crate ekiden_key_manager_api; +extern crate ekiden_rpc_client; +extern crate ekiden_rpc_common; +extern crate ekiden_rpc_trusted; + +mod client; + +pub use client::KeyManager; + +/// Helper macro to configure key manager contract identity from a generated file. +/// +/// Before a contract can use the key manager client to perform any operations, it +/// needs to configure the identity of the key manager contract that it will be +/// using. +/// +/// This can be done by generating an identity of the contract in a build script and +/// then calling this macro to configure this identity with the key manager client. +/// +/// The macro takes one argument, a filename of the generated identity file. +#[macro_export] +macro_rules! use_key_manager_contract { + ($identity:expr) => { + #[cfg(target_env = "sgx")] + global_ctors_object! { + KEY_MANAGER_INIT, key_manager_init = { + use ekiden_core::enclave::quote::MrEnclave; + use ekiden_trusted::key_manager::KeyManager; + + // Setup the key manager contract identity. + KeyManager::get().unwrap().set_contract(MrEnclave(*include_bytes!($identity))); + } + } + } +} diff --git a/contracts/key-manager/src/key_store.rs b/contracts/key-manager/src/key_store.rs new file mode 100644 index 00000000000..862a9109501 --- /dev/null +++ b/contracts/key-manager/src/key_store.rs @@ -0,0 +1,97 @@ +use std::collections::HashMap; +use std::collections::hash_map::Entry; +#[cfg(not(target_env = "sgx"))] +use std::sync::{Mutex, MutexGuard}; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutexGuard as MutexGuard; + +use ekiden_core::enclave::quote::MrEnclave; +use ekiden_core::error::{Error, Result}; +use ekiden_core::random; + +/// Key store, which actually stores the key manager keys. +pub struct KeyStore { + /// Key store map. + keys: HashMap>>, +} + +lazy_static! { + // Global key store object. + static ref KEY_STORE: Mutex = Mutex::new(KeyStore::new()); +} + +impl KeyStore { + const MAX_KEY_SIZE: usize = 128; + + fn new() -> Self { + KeyStore { + keys: HashMap::new(), + } + } + + /// Get global key store instance. + /// + /// Calling this method will take a lock on the global instance, which will + /// be released once the value goes out of scope. + pub fn get<'a>() -> MutexGuard<'a, KeyStore> { + KEY_STORE.lock().unwrap() + } + + /// Generate a new random key. + pub fn generate_key(size: usize) -> Result> { + if size > KeyStore::MAX_KEY_SIZE { + return Err(Error::new("Key too large")); + } + + let mut key = vec![0; size]; + random::get_random_bytes(&mut key)?; + + Ok(key) + } + + /// Get or create a named key. + /// + /// Each contract (identified by its MRENCLAVE) can store multiple keys in the + /// key store, each is identified by its name string. The key size must be + /// specified and is checked when retrieving an existing key. + pub fn get_or_create_key( + &mut self, + mr_enclave: &MrEnclave, + name: &str, + size: usize, + ) -> Result> { + let key = match self.keys.entry(mr_enclave.clone()) { + Entry::Occupied(mut entry) => { + // This enclave already has some keys stored. Check if it also stores + // the target named key. + match entry.get_mut().entry(name.to_string()) { + Entry::Occupied(entry) => entry.get().clone(), + Entry::Vacant(entry) => { + let key = KeyStore::generate_key(size)?; + entry.insert(key).clone() + } + } + } + Entry::Vacant(mut entry) => { + // This enclave has nothing stored yet. Create an empty hashmap for it + // and create the given key. + let key = KeyStore::generate_key(size)?; + let new_key = key.clone(); + let mut map = HashMap::new(); + map.insert(name.to_string(), key); + entry.insert(map); + + new_key + } + }; + + // Check key length. + if key.len() != size { + return Err(Error::new("Existing key with incompatible length")); + } + + Ok(key) + } +} diff --git a/contracts/key-manager/src/lib.rs b/contracts/key-manager/src/lib.rs new file mode 100644 index 00000000000..49945632dcf --- /dev/null +++ b/contracts/key-manager/src/lib.rs @@ -0,0 +1,43 @@ +#![feature(use_extern_macros)] + +#[macro_use] +extern crate lazy_static; +extern crate protobuf; + +extern crate ekiden_core; +extern crate ekiden_key_manager_api; +extern crate ekiden_trusted; + +mod key_store; + +use ekiden_core::error::Result; +use ekiden_key_manager_api::{with_api, GetOrCreateKeyRequest, GetOrCreateKeyResponse}; +use ekiden_trusted::enclave::enclave_init; +use ekiden_trusted::rpc::create_enclave_rpc; +use ekiden_trusted::rpc::request::Request; + +use key_store::KeyStore; + +enclave_init!(); + +// Create enclave RPC handlers. +with_api! { + create_enclave_rpc!(api); +} + +fn get_or_create_key(request: &Request) -> Result { + let mut response = GetOrCreateKeyResponse::new(); + + // Query the key store. + { + let mut key_store = KeyStore::get(); + response.set_key(key_store.get_or_create_key( + // Unwrap here is safe as this contract requires mutual authentication. + &request.get_client_mr_enclave().as_ref().unwrap(), + request.get_name(), + request.get_size() as usize, + )?); + } + + Ok(response) +} diff --git a/contracts/token/Cargo.toml b/contracts/token/Cargo.toml new file mode 100644 index 00000000000..131c79e3b54 --- /dev/null +++ b/contracts/token/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "token" +version = "0.1.0-alpha.1" +authors = ["Raymond Cheng "] +build = "build.rs" + +[features] +default = [] + +[dependencies] +ekiden-core = { path = "../../core/common" } +ekiden-trusted = { path = "../../core/trusted" } +protobuf = "1.4.3" +token-api = { path = "./api" } + +[build-dependencies] +ekiden-tools = { path = "../../tools" } +ekiden-edl = { path = "../../core/edl" } diff --git a/contracts/token/Makefile.toml b/contracts/token/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/contracts/token/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/contracts/token/api/Cargo.toml b/contracts/token/api/Cargo.toml new file mode 100644 index 00000000000..0139e340c40 --- /dev/null +++ b/contracts/token/api/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "token-api" +version = "0.1.0-alpha.1" +authors = [ + "Raymond Cheng ", + "Jernej Kos " +] +build = "build.rs" + +[dependencies] +ekiden-core = { path = "../../../core/common" } +protobuf = "1.4.3" + +[build-dependencies] +ekiden-tools = { path = "../../../tools" } diff --git a/contracts/token/api/build.rs b/contracts/token/api/build.rs new file mode 100644 index 00000000000..4921aeb2825 --- /dev/null +++ b/contracts/token/api/build.rs @@ -0,0 +1,6 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::generate_mod("src/generated", &["api"]); + ekiden_tools::build_api(); +} diff --git a/contracts/token/api/src/api.proto b/contracts/token/api/src/api.proto new file mode 100644 index 00000000000..3d3afe03adb --- /dev/null +++ b/contracts/token/api/src/api.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package token; + +message TransferRequest { + string sender = 2; + string destination = 3; + uint64 value = 4; +} + +message TransferResponse { +} + +message CreateRequest { + string sender = 1; + uint64 initial_supply = 2; + string token_name = 3; + string token_symbol = 4; +} + +message CreateResponse { +} + +message GetBalanceRequest { + string account = 1; +} + +message GetBalanceResponse { + uint64 balance = 1; +} diff --git a/contracts/token/api/src/api.rs b/contracts/token/api/src/api.rs new file mode 100644 index 00000000000..ee2e462a6fb --- /dev/null +++ b/contracts/token/api/src/api.rs @@ -0,0 +1,15 @@ +use ekiden_core::rpc::rpc_api; + +rpc_api! { + metadata { + name = token; + version = "0.1.0"; + client_attestation_required = false; + } + + rpc create(CreateRequest) -> CreateResponse; + + rpc transfer(TransferRequest) -> TransferResponse; + + rpc get_balance(GetBalanceRequest) -> GetBalanceResponse; +} diff --git a/contracts/token/api/src/lib.rs b/contracts/token/api/src/lib.rs new file mode 100644 index 00000000000..1e72abb9ef6 --- /dev/null +++ b/contracts/token/api/src/lib.rs @@ -0,0 +1,12 @@ +#![feature(use_extern_macros)] + +extern crate protobuf; + +#[macro_use] +extern crate ekiden_core; + +#[macro_use] +mod api; +mod generated; + +pub use generated::api::*; diff --git a/contracts/token/build.rs b/contracts/token/build.rs new file mode 100644 index 00000000000..9b1efd5797b --- /dev/null +++ b/contracts/token/build.rs @@ -0,0 +1,14 @@ +extern crate ekiden_edl; +extern crate ekiden_tools; + +fn main() { + ekiden_tools::build_trusted(ekiden_edl::edl()); + + // Generate key manager contract identity. This determines what key manager the + // contract will be using. + ekiden_tools::generate_mod("src/generated", &[]); + ekiden_tools::generate_contract_identity( + "src/generated/key-manager.identity", + "../../target/enclave/ekiden-key-manager.so", + ); +} diff --git a/contracts/token/src/lib.rs b/contracts/token/src/lib.rs new file mode 100644 index 00000000000..d95d8fb9e78 --- /dev/null +++ b/contracts/token/src/lib.rs @@ -0,0 +1,68 @@ +#![feature(use_extern_macros)] + +extern crate protobuf; + +extern crate ekiden_core; +extern crate ekiden_trusted; + +extern crate token_api; + +mod token_contract; + +use token_api::{with_api, CreateRequest, CreateResponse, GetBalanceRequest, GetBalanceResponse, + TransferRequest, TransferResponse}; +use token_contract::TokenContract; + +use ekiden_core::error::Result; +use ekiden_trusted::enclave::enclave_init; +use ekiden_trusted::key_manager::use_key_manager_contract; +use ekiden_trusted::rpc::create_enclave_rpc; + +enclave_init!(); + +// Configure the key manager contract to use. +use_key_manager_contract!("generated/key-manager.identity"); + +// Create enclave RPC handlers. +with_api! { + create_enclave_rpc!(api); +} + +fn create(request: &CreateRequest) -> Result { + let token = TokenContract::new(); + + // TODO: Get sender from authenticated request. + token.create( + request.get_sender().to_owned(), + request.get_token_name().to_owned(), + request.get_token_symbol().to_owned(), + request.get_initial_supply(), + )?; + + Ok(CreateResponse::new()) +} + +fn transfer(request: &TransferRequest) -> Result { + let token = TokenContract::new(); + + // TODO: Get sender from authenticated request. + token.transfer( + request.get_sender().to_owned(), + request.get_destination().to_owned(), + request.get_value(), + )?; + + Ok(TransferResponse::new()) +} + +fn get_balance(request: &GetBalanceRequest) -> Result { + let token = TokenContract::new(); + + // TODO: Get sender from authenticated request. + let balance = token.get_balance(&request.get_account().to_owned())?; + + let mut response = GetBalanceResponse::new(); + response.set_balance(balance); + + Ok(response) +} diff --git a/contracts/token/src/token_contract.rs b/contracts/token/src/token_contract.rs new file mode 100644 index 00000000000..7e37cf54b41 --- /dev/null +++ b/contracts/token/src/token_contract.rs @@ -0,0 +1,149 @@ +use ekiden_core::error::{Error, Result}; +use ekiden_trusted::db::database_schema; + +database_schema! { + pub struct TokenDb { + pub name: String, + pub symbol: String, + pub total_supply: u64, + pub balance_of: Map, + } +} + +pub struct TokenContract { + /// Token database. + db: TokenDb, +} + +impl TokenContract { + pub fn new() -> Self { + TokenContract { db: TokenDb::new() } + } + + pub fn create( + &self, + sender: String, + name: String, + symbol: String, + initial_supply: u64, + ) -> Result<()> { + // TODO: Ensure that the contract has not yet been initialized. + + let decimals = 18; + let total_supply = initial_supply * 10u64.pow(decimals); + + // Initialize contract, overwriting any previous state. + self.db.name.insert(&name); + self.db.symbol.insert(&symbol); + self.db.total_supply.insert(&total_supply); + self.db.balance_of.insert(&sender, &total_supply); + + Ok(()) + } + + fn get_from_balance(&self, addr: &String, value: u64) -> Result { + match self.db.balance_of.get(addr) { + None => Err(Error::new("Nonexistent `from` account")), + Some(b) if b < value => Err(Error::new("Insufficient `from` balance")), + Some(b) => Ok(b), + } + } + + fn get_to_balance(&self, addr: &String) -> Result { + match self.db.balance_of.get(addr) { + Some(b) => Ok(b), + None => Ok(0), + } + } + + fn do_transfer(&self, from: String, to: String, value: u64) -> Result<()> { + let from_balance = self.get_from_balance(&from, value)?; + let to_balance = self.get_to_balance(&to)?; + if to_balance + value <= to_balance { + return Err(Error::new( + "Transfer value too large, overflow `to` account", + )); + } + + // Set new balances. + let previous_balances = from_balance + to_balance; + let from_balance = from_balance - value; + let to_balance = to_balance + value; + self.db.balance_of.insert(&from, &from_balance); + self.db.balance_of.insert(&to, &to_balance); + + Ok(()) + } + + // PUBLIC METHODS + // - callable over RPC + pub fn get_name(&self) -> Result { + match self.db.name.get() { + Some(name) => Ok(name), + None => Err(Error::new("Contract not yet initialized")), + } + } + + pub fn get_symbol(&self) -> Result { + match self.db.symbol.get() { + Some(symbol) => Ok(symbol), + None => Err(Error::new("Contract not yet initialized")), + } + } + + pub fn get_balance(&self, msg_sender: &String) -> Result { + self.get_to_balance(msg_sender) + } + + pub fn transfer(&self, msg_sender: String, to: String, value: u64) -> Result<()> { + self.do_transfer(msg_sender, to, value) + } + + pub fn burn(&self, msg_sender: String, value: u64) -> Result<()> { + let total_supply = match self.db.total_supply.get() { + Some(supply) => supply, + None => return Err(Error::new("Contract not yet initialized")), + }; + + let from_balance = self.get_from_balance(&msg_sender, value)?; + self.db + .balance_of + .insert(&msg_sender, &(from_balance - value)); + self.db.total_supply.insert(&(total_supply - value)); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_contract() { + let name = "Ekiden Token"; + let symbol = "EKI"; + let a1 = "testaddr"; + + let contract = TokenContract::new(); + contract.create( + "testaddr".to_owned(), + "Ekiden Token".to_owned(), + "EKI".to_owned(), + 8, + ); + + assert_eq!(name, contract.get_name().unwrap(), "name should be set"); + assert_eq!( + symbol, + contract.get_symbol().unwrap(), + "symbol should be set" + ); + + assert_eq!( + contract.get_balance(&"testaddr".to_owned()).unwrap(), + 8_000_000_000_000_000_000, + "creator should get all the tokens" + ); + } +} diff --git a/contracts/token/tests/integration_test.rs b/contracts/token/tests/integration_test.rs new file mode 100644 index 00000000000..c6ba7dcadc0 --- /dev/null +++ b/contracts/token/tests/integration_test.rs @@ -0,0 +1,6 @@ +//extern crate token; + +#[test] +fn it_adds_two() { + assert_eq!(4, 4); +} diff --git a/core/Makefile.toml b/core/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/core/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/core/common/Cargo.toml b/core/common/Cargo.toml new file mode 100644 index 00000000000..ac7b5d7a6c9 --- /dev/null +++ b/core/common/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "ekiden-core" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden common functionality (available to both trusted and untrusted parts)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../../enclave/common", version = "0.1.0-alpha.1" } +ekiden-rpc-common = { path = "../../rpc/common", version = "0.1.0-alpha.1" } diff --git a/core/common/Makefile.toml b/core/common/Makefile.toml new file mode 100644 index 00000000000..9dcb41fa936 --- /dev/null +++ b/core/common/Makefile.toml @@ -0,0 +1,4 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_SGX_TARGET = "true" diff --git a/core/common/src/lib.rs b/core/common/src/lib.rs new file mode 100644 index 00000000000..0bb504e2ee1 --- /dev/null +++ b/core/common/src/lib.rs @@ -0,0 +1,15 @@ +#![feature(use_extern_macros)] + +extern crate ekiden_common; +extern crate ekiden_enclave_common; +extern crate ekiden_rpc_common; + +pub use ekiden_common::*; + +pub mod enclave { + pub use ekiden_enclave_common::*; +} + +pub mod rpc { + pub use ekiden_rpc_common::*; +} diff --git a/core/edl/Cargo.toml b/core/edl/Cargo.toml new file mode 100644 index 00000000000..dbe9dfe6a14 --- /dev/null +++ b/core/edl/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "ekiden-edl" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden EDL" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } +ekiden-enclave-edl = { path = "../../enclave/edl", version = "0.1.0-alpha.1" } +ekiden-rpc-edl = { path = "../../rpc/edl", version = "0.1.0-alpha.1" } +ekiden-db-edl = { path = "../../db/edl", version = "0.1.0-alpha.1" } +sgx_edl = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } diff --git a/core/edl/Makefile.toml b/core/edl/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/core/edl/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/core/edl/src/core.edl b/core/edl/src/core.edl new file mode 100644 index 00000000000..0a7dc34ffc4 --- /dev/null +++ b/core/edl/src/core.edl @@ -0,0 +1,4 @@ +enclave { + // Import from Intel SGX SDK. + from "sgx_tstdc.edl" import *; +}; diff --git a/core/edl/src/enclave.lds b/core/edl/src/enclave.lds new file mode 100644 index 00000000000..e3d9d0ee0d9 --- /dev/null +++ b/core/edl/src/enclave.lds @@ -0,0 +1,9 @@ +enclave.so +{ + global: + g_global_data_sim; + g_global_data; + enclave_entry; + local: + *; +}; diff --git a/core/edl/src/enclave.xml b/core/edl/src/enclave.xml new file mode 100644 index 00000000000..132c4616a52 --- /dev/null +++ b/core/edl/src/enclave.xml @@ -0,0 +1,12 @@ + + 0 + 0 + 0x40000 + 0x10000000 + + 1 + 0 + 0 + 0 + 0xFFFFFFFF + diff --git a/core/edl/src/lib.rs b/core/edl/src/lib.rs new file mode 100644 index 00000000000..fca8756be08 --- /dev/null +++ b/core/edl/src/lib.rs @@ -0,0 +1,16 @@ +#[macro_use] +extern crate ekiden_tools; + +extern crate ekiden_db_edl; +extern crate ekiden_enclave_edl; +extern crate ekiden_rpc_edl; +extern crate sgx_edl; + +define_edl! { + use sgx_edl; + use ekiden_enclave_edl; + use ekiden_rpc_edl; + use ekiden_db_edl; + + "core.edl", +} diff --git a/core/trusted/Cargo.toml b/core/trusted/Cargo.toml new file mode 100644 index 00000000000..8fb2a5fe974 --- /dev/null +++ b/core/trusted/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "ekiden-trusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden trusted library" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-enclave-trusted = { path = "../../enclave/trusted", version = "0.1.0-alpha.1" } +ekiden-rpc-trusted = { path = "../../rpc/trusted", version = "0.1.0-alpha.1" } +ekiden-db-trusted = { path = "../../db/trusted", version = "0.1.0-alpha.1" } +ekiden-key-manager-client = { path = "../../contracts/key-manager/client", version = "0.1.0-alpha.1" } diff --git a/core/trusted/Makefile.toml b/core/trusted/Makefile.toml new file mode 100644 index 00000000000..0914bd0f4f2 --- /dev/null +++ b/core/trusted/Makefile.toml @@ -0,0 +1,5 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_HOST_TARGET = "false" +BUILD_FOR_SGX_TARGET = "true" diff --git a/core/trusted/src/lib.rs b/core/trusted/src/lib.rs new file mode 100644 index 00000000000..df08b19c9dd --- /dev/null +++ b/core/trusted/src/lib.rs @@ -0,0 +1,22 @@ +#![feature(use_extern_macros)] + +extern crate ekiden_db_trusted; +extern crate ekiden_enclave_trusted; +extern crate ekiden_key_manager_client; +extern crate ekiden_rpc_trusted; + +pub mod enclave { + pub use ekiden_enclave_trusted::*; +} + +pub mod rpc { + pub use ekiden_rpc_trusted::*; +} + +pub mod db { + pub use ekiden_db_trusted::*; +} + +pub mod key_manager { + pub use ekiden_key_manager_client::*; +} diff --git a/core/untrusted/Cargo.toml b/core/untrusted/Cargo.toml new file mode 100644 index 00000000000..f63899baf05 --- /dev/null +++ b/core/untrusted/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "ekiden-untrusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden untrusted library" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-enclave-untrusted = { path = "../../enclave/untrusted", version = "0.1.0-alpha.1" } +ekiden-rpc-untrusted = { path = "../../rpc/untrusted", version = "0.1.0-alpha.1" } +ekiden-db-untrusted = { path = "../../db/untrusted", version = "0.1.0-alpha.1" } + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/core/untrusted/Makefile.toml b/core/untrusted/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/core/untrusted/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/core/untrusted/src/lib.rs b/core/untrusted/src/lib.rs new file mode 100644 index 00000000000..c77bcf26879 --- /dev/null +++ b/core/untrusted/src/lib.rs @@ -0,0 +1,22 @@ +#![feature(use_extern_macros)] + +extern crate ekiden_db_untrusted; +extern crate ekiden_enclave_untrusted; +extern crate ekiden_rpc_untrusted; + +pub use ekiden_db_untrusted::EnclaveDb; +pub use ekiden_enclave_untrusted::Enclave; +pub use ekiden_enclave_untrusted::identity::EnclaveIdentity; +pub use ekiden_rpc_untrusted::EnclaveRpc; + +pub mod enclave { + pub use ekiden_enclave_untrusted::*; +} + +pub mod rpc { + pub use ekiden_rpc_untrusted::*; +} + +pub mod db { + pub use ekiden_db_untrusted::*; +} diff --git a/db/Makefile.toml b/db/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/db/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/db/edl/Cargo.toml b/db/edl/Cargo.toml new file mode 100644 index 00000000000..405831ad608 --- /dev/null +++ b/db/edl/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "ekiden-db-edl" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden database EDL" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/db/edl/Makefile.toml b/db/edl/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/db/edl/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/db/edl/src/db.edl b/db/edl/src/db.edl new file mode 100644 index 00000000000..f075233dade --- /dev/null +++ b/db/edl/src/db.edl @@ -0,0 +1,34 @@ +enclave { + trusted { + public void db_state_diff( + [user_check] const uint8_t *old, + size_t old_length, + [user_check] const uint8_t *new, + size_t new_length, + [user_check] uint8_t *diff, + size_t diff_capacity, + [out] size_t *diff_length + ); + + public void db_state_apply( + [user_check] const uint8_t *old, + size_t old_length, + [user_check] const uint8_t *diff, + size_t diff_length, + [user_check] uint8_t *new, + size_t new_capacity, + [out] size_t *new_length + ); + + public void db_state_set( + [user_check] const uint8_t *state, + size_t state_length + ); + + public void db_state_get( + [user_check] uint8_t *state, + size_t state_capacity, + [out] size_t *state_length + ); + }; +}; diff --git a/db/edl/src/lib.rs b/db/edl/src/lib.rs new file mode 100644 index 00000000000..c8d5e6bb2df --- /dev/null +++ b/db/edl/src/lib.rs @@ -0,0 +1,6 @@ +#[macro_use] +extern crate ekiden_tools; + +define_edl! { + "db.edl" +} diff --git a/db/trusted/Cargo.toml b/db/trusted/Cargo.toml new file mode 100644 index 00000000000..d54d2da1652 --- /dev/null +++ b/db/trusted/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "ekiden-db-trusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden database (trusted part)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +bsdiff = "0.1.3" +bzip2 = "0.3.2" +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-trusted = { path = "../../enclave/trusted", version = "0.1.0-alpha.1" } +ekiden-key-manager-client = { path = "../../contracts/key-manager/client", version = "0.1.0-alpha.1" } +lazy_static = { version = "1.0", features = ["spin_no_std"] } +protobuf = "1.4.3" +sodalite = "0.3.0" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/db/trusted/Makefile.toml b/db/trusted/Makefile.toml new file mode 100644 index 00000000000..0914bd0f4f2 --- /dev/null +++ b/db/trusted/Makefile.toml @@ -0,0 +1,5 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_HOST_TARGET = "false" +BUILD_FOR_SGX_TARGET = "true" diff --git a/db/trusted/benches/benchmarks.rs b/db/trusted/benches/benchmarks.rs new file mode 100644 index 00000000000..3cc1a726227 --- /dev/null +++ b/db/trusted/benches/benchmarks.rs @@ -0,0 +1,152 @@ +#![feature(test)] + +extern crate test; + +extern crate ekiden_db_trusted; + +use test::Bencher; + +use ekiden_db_trusted::{Database, DatabaseHandle}; +use ekiden_db_trusted::ecalls::{db_state_apply, db_state_diff, db_state_get, db_state_set}; + +/// Populate the database with some dummy state. +fn generate_dummy_state() { + let mut db = DatabaseHandle::instance(); + db.insert(b"example_key1", &vec![42; 128]); + db.insert(b"example_key2", &vec![21; 128]); +} + +/// Export current database state. +fn export_db_state() -> Vec { + let mut state: Vec = Vec::with_capacity(64 * 1024); + let mut state_length = 0; + + db_state_get(state.as_mut_ptr(), state.capacity(), &mut state_length); + + unsafe { + state.set_len(state_length); + } + assert!(!state.is_empty()); + + state +} + +/// Benchmark raw database set with a 128-byte value. +#[bench] +fn benchmark_insert_raw128(b: &mut Bencher) { + b.iter(|| { + let mut db = DatabaseHandle::instance(); + db.insert(b"example_key", &vec![42; 128]); + }); +} + +/// Benchmark raw database get with a 128-byte value. +#[bench] +fn benchmark_get_raw128(b: &mut Bencher) { + generate_dummy_state(); + + b.iter(|| { + let db = DatabaseHandle::instance(); + assert_eq!(db.get(b"example_key1"), Some(vec![42; 128])); + assert_eq!(db.get(b"example_key2"), Some(vec![21; 128])); + }); +} + +/// Benchmark database export. +#[bench] +fn benchmark_export(b: &mut Bencher) { + generate_dummy_state(); + + b.iter(|| { + export_db_state(); + }); +} + +/// Benchmark database import. +#[bench] +fn benchmark_import(b: &mut Bencher) { + generate_dummy_state(); + let state = export_db_state(); + + b.iter(|| { + db_state_set(state.as_ptr(), state.len()); + }); +} + +/// Benchmark database diff. +#[bench] +fn benchmark_diff(b: &mut Bencher) { + generate_dummy_state(); + let old_state = export_db_state(); + + { + let mut db = DatabaseHandle::instance(); + db.insert(b"example_key1", &vec![21; 128]); + } + + let new_state = export_db_state(); + + b.iter(|| { + let mut diff: Vec = Vec::with_capacity(64 * 1024); + let mut diff_length = 0; + + db_state_diff( + old_state.as_ptr(), + old_state.len(), + new_state.as_ptr(), + new_state.len(), + diff.as_mut_ptr(), + diff.capacity(), + &mut diff_length, + ); + + assert!(diff_length > 0); + }); +} + +/// Benchmark database diff apply. +#[bench] +fn benchmark_apply(b: &mut Bencher) { + generate_dummy_state(); + let old_state = export_db_state(); + + { + let mut db = DatabaseHandle::instance(); + db.insert(b"example_key1", &vec![21; 128]); + } + + let new_state = export_db_state(); + + // Generate diff. + let mut diff: Vec = Vec::with_capacity(64 * 1024); + let mut diff_length = 0; + + db_state_diff( + old_state.as_ptr(), + old_state.len(), + new_state.as_ptr(), + new_state.len(), + diff.as_mut_ptr(), + diff.capacity(), + &mut diff_length, + ); + + unsafe { + diff.set_len(diff_length); + } + + b.iter(|| { + let mut output: Vec = Vec::with_capacity(64 * 1024); + let mut output_length = 0; + + db_state_apply( + old_state.as_ptr(), + old_state.len(), + diff.as_ptr(), + diff.len(), + output.as_mut_ptr(), + output.capacity(), + &mut output_length, + ); + }); +} diff --git a/db/trusted/build.rs b/db/trusted/build.rs new file mode 100644 index 00000000000..e3fe80ac0a3 --- /dev/null +++ b/db/trusted/build.rs @@ -0,0 +1,11 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::generate_mod("src/generated", &["database"]); + + ekiden_tools::protoc(ekiden_tools::ProtocArgs { + out_dir: "src/generated/", + input: &["src/database.proto"], + includes: &["src/"], + }); +} diff --git a/db/trusted/src/crypto.rs b/db/trusted/src/crypto.rs new file mode 100644 index 00000000000..04332bc4d6c --- /dev/null +++ b/db/trusted/src/crypto.rs @@ -0,0 +1,79 @@ +use sodalite; + +use ekiden_common::error::{Error, Result}; +use ekiden_common::random; + +#[cfg(target_env = "sgx")] +use ekiden_key_manager_client::KeyManager; + +use super::generated::database::CryptoSecretbox; + +const SECRETBOX_ZEROBYTES: usize = 32; + +/// Retrieve or generate state secret key. +#[cfg(target_env = "sgx")] +fn get_state_key() -> Result { + let key = KeyManager::get()?.get_or_create_key("state", sodalite::SECRETBOX_KEY_LEN)?; + let mut state_key = [0; sodalite::SECRETBOX_KEY_LEN]; + state_key.copy_from_slice(key.as_slice()); + + Ok(state_key) +} + +#[cfg(not(target_env = "sgx"))] +fn get_state_key() -> Result { + // This implementation is used in unit tests (on non-SGX). + Ok([42; sodalite::SECRETBOX_KEY_LEN]) +} + +/// Open encrypted state box. +pub fn decrypt_state(encrypted_state: &CryptoSecretbox) -> Result> { + let state_key = get_state_key()?; + let encrypted_state_ciphertext = encrypted_state.get_ciphertext(); + + let mut encrypted_state_nonce: sodalite::SecretboxNonce = [0; sodalite::SECRETBOX_NONCE_LEN]; + encrypted_state_nonce.copy_from_slice(encrypted_state.get_nonce()); + + let mut state_raw_padded = vec![0; encrypted_state_ciphertext.len()]; + + match sodalite::secretbox_open( + &mut state_raw_padded, + encrypted_state_ciphertext, + &encrypted_state_nonce, + &state_key, + ) { + Ok(_) => {} + _ => return Err(Error::new("Failed to open state box")), + } + + Ok(state_raw_padded[SECRETBOX_ZEROBYTES..].to_vec()) +} + +/// Generate encrypted state box. +pub fn encrypt_state(mut state: Vec) -> Result { + let state_key = get_state_key()?; + + let mut state_raw_padded = vec![0; SECRETBOX_ZEROBYTES]; + state_raw_padded.append(&mut state); + + let mut encrypted_state_nonce = [0; sodalite::SECRETBOX_NONCE_LEN]; + random::get_random_bytes(&mut encrypted_state_nonce)?; + + let mut encrypted_state_ciphertext = vec![0; state_raw_padded.len()]; + + match sodalite::secretbox( + &mut encrypted_state_ciphertext, + &state_raw_padded, + &encrypted_state_nonce, + &state_key, + ) { + Ok(_) => {} + _ => return Err(Error::new("Failed to create state box")), + } + + let mut encrypted_state = CryptoSecretbox::new(); + encrypted_state.set_ciphertext(encrypted_state_ciphertext); + encrypted_state.set_nonce(encrypted_state_nonce.to_vec()); + + Ok(encrypted_state) +} diff --git a/db/trusted/src/database.proto b/db/trusted/src/database.proto new file mode 100644 index 00000000000..3e24a2c7eda --- /dev/null +++ b/db/trusted/src/database.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package database; + +// Cryptographic secretbox (encrypted and authenticated). +message CryptoSecretbox { + // Padded ciphertext. + bytes ciphertext = 1; + // Nonce. + bytes nonce = 2; +} + +// Structure used to store diffs. +message BsdiffPatch { + // Length of new blob. + uint64 new_length = 1; + // Compressed patch stream from bsdiff. + bytes patch_bz2 = 2; +} + +// Serialized state. +message State { + message KeyValue { + bytes key = 1; + bytes value = 2; + } + + repeated KeyValue state = 1; +} diff --git a/db/trusted/src/diffs.rs b/db/trusted/src/diffs.rs new file mode 100644 index 00000000000..93aae7e1ef6 --- /dev/null +++ b/db/trusted/src/diffs.rs @@ -0,0 +1,51 @@ +use std; + +use bsdiff; +use bzip2; +use protobuf; +use protobuf::Message; + +use ekiden_common::error::Result; + +use super::crypto; +use super::generated::database::{BsdiffPatch, CryptoSecretbox}; + +/// Diff: create a summary of changes that can be applied to `old` to recreate `new`. +/// This is the actual diffing algorithm implementation. +fn diff_internal(old: &[u8], new: &[u8]) -> Result> { + let mut enc = bzip2::write::BzEncoder::new( + std::io::Cursor::new(Vec::new()), + bzip2::Compression::Default, + ); + bsdiff::diff::diff(old, new, &mut enc)?; + let mut m = BsdiffPatch::new(); + m.set_new_length(new.len() as u64); + m.set_patch_bz2(enc.finish()?.into_inner()); + Ok(m.write_to_bytes()?) +} + +/// Apply: change `old` as specified by `diff`. +/// `apply_internal(&old, &diff_internal(&old, &new))` should be the same as `new`. +fn apply_internal(old: &[u8], diff: &[u8]) -> Result> { + let m: BsdiffPatch = protobuf::parse_from_bytes(diff)?; + let mut dec = bzip2::read::BzDecoder::new(std::io::Cursor::new(m.get_patch_bz2())); + let mut new = vec![0; m.get_new_length() as usize]; + bsdiff::patch::patch(old, &mut dec, &mut new)?; + Ok(new) +} + +pub fn diff(old: &CryptoSecretbox, new: &CryptoSecretbox) -> Result { + let old = crypto::decrypt_state(&old)?; + let new = crypto::decrypt_state(&new)?; + let diff = diff_internal(&old, &new)?; + + Ok(crypto::encrypt_state(diff)?) +} + +pub fn apply(old: &CryptoSecretbox, diff: &CryptoSecretbox) -> Result { + let old = crypto::decrypt_state(&old)?; + let diff = crypto::decrypt_state(&diff)?; + let new = apply_internal(&old, &diff)?; + + Ok(crypto::encrypt_state(new)?) +} diff --git a/db/trusted/src/ecalls.rs b/db/trusted/src/ecalls.rs new file mode 100644 index 00000000000..eb54be8f288 --- /dev/null +++ b/db/trusted/src/ecalls.rs @@ -0,0 +1,80 @@ +use ekiden_common::profile_block; +use ekiden_enclave_trusted::utils::{read_enclave_request, write_enclave_response}; + +use super::diffs; +use super::handle::DatabaseHandle; + +#[no_mangle] +pub extern "C" fn db_state_diff( + old: *const u8, + old_length: usize, + new: *const u8, + new_length: usize, + diff: *mut u8, + diff_capacity: usize, + diff_length: *mut usize, +) { + profile_block!(); + + let old = read_enclave_request(old, old_length); + let new = read_enclave_request(new, new_length); + + // TODO: Error handling. + let result = match diffs::diff(&old, &new) { + Ok(result) => result, + _ => panic!("Error while computing difference"), + }; + + // Copy back response. + write_enclave_response(&result, diff, diff_capacity, diff_length); +} + +#[no_mangle] +pub extern "C" fn db_state_apply( + old: *const u8, + old_length: usize, + diff: *const u8, + diff_length: usize, + new: *mut u8, + new_capacity: usize, + new_length: *mut usize, +) { + profile_block!(); + + let old = read_enclave_request(old, old_length); + let diff = read_enclave_request(diff, diff_length); + + // TODO: Error handling. + let result = match diffs::apply(&old, &diff) { + Ok(result) => result, + _ => panic!("Error while applying diff"), + }; + + // Copy back response. + write_enclave_response(&result, new, new_capacity, new_length); +} + +#[no_mangle] +pub extern "C" fn db_state_set(state: *const u8, state_length: usize) { + profile_block!(); + + let state = read_enclave_request(state, state_length); + + // TODO: Propagate errors. + DatabaseHandle::instance() + .import(&state) + .expect("Error importing state"); +} + +#[no_mangle] +pub extern "C" fn db_state_get(state: *mut u8, state_capacity: usize, state_length: *mut usize) { + profile_block!(); + + // TODO: Propagate errors. + let result = DatabaseHandle::instance() + .export() + .expect("Error exporting state"); + + // Copy back response. + write_enclave_response(&result, state, state_capacity, state_length); +} diff --git a/db/trusted/src/handle.rs b/db/trusted/src/handle.rs new file mode 100644 index 00000000000..5bcf66cfc7a --- /dev/null +++ b/db/trusted/src/handle.rs @@ -0,0 +1,137 @@ +//! Low-level key-value database interface. +use std::collections::HashMap; +#[cfg(not(target_env = "sgx"))] +use std::sync::{Mutex, MutexGuard}; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutexGuard as MutexGuard; + +use protobuf::{self, Message}; + +use ekiden_common::error::Result; + +use super::Database; +use super::crypto; +use super::generated::database::{CryptoSecretbox, State, State_KeyValue}; + +/// Database handle. +/// +/// This is a concrete implementation of the [`Database`] interface. +/// +/// [`Database`]: super::Database +pub struct DatabaseHandle { + /// Current database state. + state: HashMap, Vec>, + /// Dirtyness flag. + dirty: bool, +} + +lazy_static! { + // Global database object. + static ref DB: Mutex = Mutex::new(DatabaseHandle::new()); +} + +impl DatabaseHandle { + /// Construct new database interface. + fn new() -> Self { + DatabaseHandle { + state: HashMap::new(), + dirty: false, + } + } + + /// Get global database interface instance. + /// + /// Calling this method will take a lock on the global instance, which will + /// be released once the value goes out of scope. + pub fn instance<'a>() -> MutexGuard<'a, DatabaseHandle> { + DB.lock().unwrap() + } + + /// Import database. + pub(crate) fn import(&mut self, state: &CryptoSecretbox) -> Result<()> { + let mut state: State = protobuf::parse_from_bytes(&crypto::decrypt_state(&state)?)?; + + self.state.clear(); + for kv in state.take_state().iter_mut() { + self.state.insert(kv.take_key(), kv.take_value()); + } + + self.dirty = false; + + Ok(()) + } + + /// Export database. + /// + /// If nothing was modified since the last import, this method will return an + /// uninitialized CryptoSecretbox. + pub(crate) fn export(&mut self) -> Result { + if !self.dirty { + // Database has not changed, we don't need to export anything. + return Ok(CryptoSecretbox::new()); + } + + let mut state = State::new(); + { + let items = state.mut_state(); + for (key, value) in &self.state { + let mut item = State_KeyValue::new(); + item.set_key(key.clone()); + item.set_value(value.clone()); + + items.push(item); + } + } + + Ok(crypto::encrypt_state(state.write_to_bytes()?)?) + } +} + +impl Database for DatabaseHandle { + fn contains_key(&self, key: &[u8]) -> bool { + self.state.contains_key(key) + } + + fn get(&self, key: &[u8]) -> Option> { + self.state.get(key).cloned() + } + + fn insert(&mut self, key: &[u8], value: &[u8]) -> Option> { + self.dirty = true; + self.state.insert(key.to_owned(), value.to_owned()) + } + + fn remove(&mut self, key: &[u8]) -> Option> { + self.dirty = true; + self.state.remove(key) + } + + /// Clear database state. + fn clear(&mut self) { + self.dirty = true; + self.state.clear(); + } +} + +#[cfg(test)] +mod tests { + use super::{Database, DatabaseHandle}; + + #[test] + fn test_basic_operations() { + let mut db = DatabaseHandle::instance(); + + db.clear(); + db.insert(b"foo", b"hello world"); + db.insert(b"bar", b"another data value"); + + assert_eq!(db.get(b"foo"), Some(b"hello world".to_vec())); + assert_eq!(db.get(b"another"), None); + + db.remove(b"foo"); + + assert_eq!(db.get(b"foo"), None); + } +} diff --git a/db/trusted/src/lib.rs b/db/trusted/src/lib.rs new file mode 100644 index 00000000000..80c0d335bb4 --- /dev/null +++ b/db/trusted/src/lib.rs @@ -0,0 +1,53 @@ +#![feature(core_intrinsics)] +#![feature(use_extern_macros)] + +extern crate bsdiff; +extern crate bzip2; +#[macro_use] +extern crate lazy_static; +extern crate protobuf; +extern crate sodalite; + +#[macro_use] +extern crate ekiden_common; +extern crate ekiden_enclave_trusted; +extern crate ekiden_key_manager_client; + +mod generated; + +mod crypto; +mod diffs; +#[doc(hidden)] +pub mod ecalls; + +pub mod handle; +pub use handle::DatabaseHandle; + +#[macro_use] +pub mod schema; + +/// Database interface exposed to contracts. +pub trait Database { + /// Returns true if the database contains a value for the specified key. + fn contains_key(&self, key: &[u8]) -> bool; + + /// Fetch entry with given key. + fn get(&self, key: &[u8]) -> Option>; + + /// Update entry with given key. + /// + /// If the database did not have this key present, [`None`] is returned. + /// + /// If the database did have this key present, the value is updated, and the old value is + /// returned. + /// + /// [`None`]: std::option::Option + fn insert(&mut self, key: &[u8], value: &[u8]) -> Option>; + + /// Remove entry with given key, returning the value at the key if the key was previously + /// in the database. + fn remove(&mut self, key: &[u8]) -> Option>; + + /// Clear database state. + fn clear(&mut self); +} diff --git a/db/trusted/src/schema/descriptor.rs b/db/trusted/src/schema/descriptor.rs new file mode 100644 index 00000000000..7abb439d752 --- /dev/null +++ b/db/trusted/src/schema/descriptor.rs @@ -0,0 +1,212 @@ +//! Field descriptors used in the schema-based interface. +use std::borrow::Borrow; +use std::marker::PhantomData; + +use ekiden_common::serializer::{Deserializable, Serializable}; + +use super::super::{Database, DatabaseHandle}; + +/// Descriptor for scalar fields. +pub struct ScalarDescriptor { + namespace: &'static str, + name: &'static str, + value_type: PhantomData, +} + +/// Descriptor for map fields. +pub struct MapDescriptor { + namespace: &'static str, + name: &'static str, + key_type: PhantomData, + value_type: PhantomData, +} + +impl ScalarDescriptor +where + T: Serializable + Deserializable, +{ + /// Create new scalar descriptor. + pub fn new(namespace: &'static str, name: &'static str) -> Self { + Self { + namespace: &namespace, + name: &name, + value_type: PhantomData, + } + } + + /// Derive the key for storing this field in the underlying database. + fn get_key(&self) -> Vec { + let mut key = vec![]; + self.namespace.write_to(&mut key).unwrap(); + self.name.write_to(&mut key).unwrap(); + + key + } + + /// Insert a value for this field. + /// + /// If the database did not have this key present, [`None`] is returned. + /// + /// If the database did have this key present, the value is updated, and the old value is + /// returned. + /// + /// The value may be any borrowed form of the descriptor's value type, but [`Serializable`] + /// on the borrowed form must match those for the value type. + /// + /// [`None`]: std::option::Option + /// [`Serializable`]: ekiden_common::serializer::Serializable + pub fn insert(&self, value: &Q) -> Option + where + T: Borrow, + Q: ?Sized + Serializable, + { + let mut db = DatabaseHandle::instance(); + let value = Serializable::write(value.borrow()).expect("Failed to serialize state"); + match db.insert(&self.get_key(), &value) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Fetch a value for this field. + pub fn get(&self) -> Option { + let db = DatabaseHandle::instance(); + match db.get(&self.get_key()) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Remove a value for this field, returning the value at the key if the key was previously + /// in the database. + pub fn remove(&self) -> Option { + let mut db = DatabaseHandle::instance(); + match db.remove(&self.get_key()) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Check if a field is present in the underlying database. + pub fn is_present(&self) -> bool { + let db = DatabaseHandle::instance(); + db.contains_key(&self.get_key()) + } +} + +impl MapDescriptor +where + K: Serializable, + V: Serializable + Deserializable, +{ + /// Create new map descriptor. + pub fn new(namespace: &'static str, name: &'static str) -> Self { + Self { + namespace: &namespace, + name: &name, + key_type: PhantomData, + value_type: PhantomData, + } + } + + /// Derive the key for storing this field in the underlying database. + /// + /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] + /// on the borrowed form must match those for the key type. + /// + /// [`Serializable`]: ekiden_common::serializer::Serializable + fn get_key_for_subkey(&self, subkey: &Q) -> Vec + where + K: Borrow, + Q: ?Sized + Serializable, + { + let mut key = vec![]; + self.namespace.write_to(&mut key).unwrap(); + self.name.write_to(&mut key).unwrap(); + subkey.write_to(&mut key).unwrap(); + + key + } + + /// Insert a value for this field. + /// + /// If the database did not have this key present, [`None`] is returned. + /// + /// If the database did have this key present, the value is updated, and the old value is + /// returned. + /// + /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] + /// on the borrowed form must match those for the key type. + /// + /// The value may be any borrowed form of the descriptor's value type, but [`Serializable`] + /// on the borrowed form must match those for the value type. + /// + /// [`None`]: std::option::Option + /// [`Serializable`]: ekiden_common::serializer::Serializable + pub fn insert(&self, key: &Q, value: &P) -> Option + where + K: Borrow, + V: Borrow

, + Q: ?Sized + Serializable, + P: ?Sized + Serializable, + { + let mut db = DatabaseHandle::instance(); + let value = Serializable::write(value.borrow()).expect("Failed to serialize value"); + match db.insert(&self.get_key_for_subkey(key), &value) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Fetch a value for this field. + /// + /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] + /// on the borrowed form must match those for the key type. + /// + /// [`Serializable`]: ekiden_common::serializer::Serializable + pub fn get(&self, key: &Q) -> Option + where + K: Borrow, + Q: ?Sized + Serializable, + { + let db = DatabaseHandle::instance(); + match db.get(&self.get_key_for_subkey(key)) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Remove a value for this field, returning the value at the key if the key was previously + /// in the database. + /// + /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] + /// on the borrowed form must match those for the key type. + /// + /// [`Serializable`]: ekiden_common::serializer::Serializable + pub fn remove(&self, key: &Q) -> Option + where + K: Borrow, + Q: ?Sized + Serializable, + { + let mut db = DatabaseHandle::instance(); + match db.remove(&self.get_key_for_subkey(key)) { + Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), + None => None, + } + } + + /// Check if a field is present in the underlying database. + /// + /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] + /// on the borrowed form must match those for the key type. + /// + /// [`Serializable`]: ekiden_common::serializer::Serializable + pub fn contains_key(&self, key: &Q) -> bool + where + K: Borrow, + Q: ?Sized + Serializable, + { + let db = DatabaseHandle::instance(); + db.contains_key(&self.get_key_for_subkey(key)) + } +} diff --git a/db/trusted/src/schema/macros.rs b/db/trusted/src/schema/macros.rs new file mode 100644 index 00000000000..6f62fee8f03 --- /dev/null +++ b/db/trusted/src/schema/macros.rs @@ -0,0 +1,116 @@ +/// Define a database schema structure. +/// +/// Each field in the defined structure is replaced by a descriptor for the given +/// field type. A [`ScalarDescriptor`] is generated for each scalar field and a +/// [`MapDescriptor`] is generated for each `Map` field. +/// +/// Any type that implements the [`Serializable`] and [`Deserializable`] traits can +/// be used in the schema struct as a value (either as a scalar field or in a map). +/// +/// Any type that implements the [`Serializable`] trait can be used in the schema +/// struct as a key in mappings. +/// +/// [`ScalarDescriptor`]: super::descriptor::ScalarDescriptor +/// [`MapDescriptor`]: super::descriptor::MapDescriptor +/// [`Serializable`]: ekiden_common::serializer::Serializable +/// [`Deserializable`]: ekiden_common::serializer::Deserializable +/// +/// # Examples +/// +/// ```ignore +/// database_schema! { +/// pub struct MySchema { +/// pub foo: String, +/// pub bar: u64, +/// pub mapping: Map, +/// } +/// } +/// ``` +#[macro_export] +macro_rules! database_schema { + () => {}; + + // Entry point, parse struct(s). + ( + $( + pub struct $schema_name:ident { + $($body:tt)* + } + )* + ) => { + $( + database_schema!(@parse_body($schema_name) -> ($($body)*)); + )* + }; + + // Internal pattern: parse map field. + ( + @parse_body($($args:tt)*) -> ( + pub $field_name:ident : Map<$key_type:ty, $value_type:ty>, + $($tail:tt)* + ) + ) => { + database_schema!( + @parse_body( + $($args)*, + ( + $field_name, + $crate::schema::descriptor::MapDescriptor<$key_type, $value_type>, + $crate::schema::descriptor::MapDescriptor::new + ) + ) -> ( + $($tail)* + ) + ); + }; + + // Internal pattern: parse scalar field. + ( + @parse_body($($args:tt)*) -> ( + pub $field_name:ident : $field_type:ty, + $($tail:tt)* + ) + ) => { + database_schema!( + @parse_body( + $($args)*, + ( + $field_name, + $crate::schema::descriptor::ScalarDescriptor<$field_type>, + $crate::schema::descriptor::ScalarDescriptor::new + ) + ) -> ( + $($tail)* + ) + ); + }; + + // Internal pattern: emit final struct. + ( + @parse_body( + $schema_name:ident, + $( + ($field_name:ident, $field_def:ty, $field_new:expr) + ),* + ) -> () + ) => { + pub struct $schema_name { + $( + pub $field_name: $field_def, + )* + } + + impl $schema_name { + pub fn new() -> Self { + Self { + $( + $field_name: $field_new( + stringify!($schema_name), + stringify!($field_name) + ), + )* + } + } + } + } +} diff --git a/db/trusted/src/schema/mod.rs b/db/trusted/src/schema/mod.rs new file mode 100644 index 00000000000..5babf0d27b4 --- /dev/null +++ b/db/trusted/src/schema/mod.rs @@ -0,0 +1,88 @@ +//! Higher-level schema-based database interface. +pub mod descriptor; + +#[doc(hidden)] +#[macro_use] +pub mod macros; + +#[cfg(test)] +mod tests { + use super::super::{Database, DatabaseHandle}; + + database_schema! { + pub struct TestSchema { + pub foo: String, + pub bar: String, + pub moo: u64, + pub balance_of: Map, + } + + pub struct AnotherSchema { + pub foo: String, + pub bar: String, + } + } + + #[test] + fn test_operations() { + { + let mut db = DatabaseHandle::instance(); + db.clear(); + } + + let schema = TestSchema::new(); + + // Test scalars. + assert!(!schema.foo.is_present()); + assert!(!schema.bar.is_present()); + assert!(!schema.moo.is_present()); + + assert_eq!(schema.foo.insert("hello world"), None); + assert_eq!(schema.moo.insert(&42), None); + + assert!(schema.foo.is_present()); + assert!(!schema.bar.is_present()); + assert!(schema.moo.is_present()); + + assert_eq!(schema.foo.get(), Some("hello world".to_owned())); + assert_eq!(schema.moo.get(), Some(42)); + + assert_eq!(schema.moo.remove(), Some(42)); + assert!(!schema.moo.is_present()); + + assert_eq!(schema.foo.insert("another"), Some("hello world".to_owned())); + + // Test map. + assert_eq!(schema.balance_of.insert("inner_key", &42), None); + assert!(schema.balance_of.contains_key("inner_key")); + assert!(!schema.balance_of.contains_key("foo")); + + assert_eq!(schema.balance_of.insert("inner_key", &100), Some(42)); + } + + #[test] + fn test_namespaces() { + { + let mut db = DatabaseHandle::instance(); + db.clear(); + } + + let schema1 = TestSchema::new(); + let schema2 = AnotherSchema::new(); + + assert!(!schema1.foo.is_present()); + assert!(!schema1.bar.is_present()); + assert!(!schema2.foo.is_present()); + assert!(!schema2.bar.is_present()); + + assert_eq!(schema1.foo.insert("hello"), None); + + assert!(schema1.foo.is_present()); + assert!(!schema2.foo.is_present()); + + assert_eq!(schema2.foo.insert("world"), None); + + assert_eq!(schema1.foo.get(), Some("hello".to_owned())); + assert_eq!(schema2.foo.get(), Some("world".to_owned())); + } +} diff --git a/db/untrusted/Cargo.toml b/db/untrusted/Cargo.toml new file mode 100644 index 00000000000..44aa9bdf970 --- /dev/null +++ b/db/untrusted/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ekiden-db-untrusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden database (untrusted part)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[features] +sgx-simulation = [] + +[dependencies] +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-untrusted = { path = "../../enclave/untrusted", version = "0.1.0-alpha.1" } + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/db/untrusted/Makefile.toml b/db/untrusted/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/db/untrusted/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/db/untrusted/src/ecall_proxy.rs b/db/untrusted/src/ecall_proxy.rs new file mode 100644 index 00000000000..26b1935b97e --- /dev/null +++ b/db/untrusted/src/ecall_proxy.rs @@ -0,0 +1,38 @@ +use sgx_types::*; + +extern "C" { + pub fn db_state_diff( + eid: sgx_enclave_id_t, + old: *const u8, + old_length: usize, + new: *const u8, + new_length: usize, + diff: *mut u8, + diff_capacity: usize, + diff_length: *mut usize, + ) -> sgx_status_t; + + pub fn db_state_apply( + eid: sgx_enclave_id_t, + old: *const u8, + old_length: usize, + diff: *const u8, + diff_length: usize, + new: *mut u8, + new_capacity: usize, + new_length: *mut usize, + ) -> sgx_status_t; + + pub fn db_state_set( + eid: sgx_enclave_id_t, + state: *const u8, + state_length: usize, + ) -> sgx_status_t; + + pub fn db_state_get( + eid: sgx_enclave_id_t, + state: *mut u8, + state_capacity: usize, + state_length: *mut usize, + ) -> sgx_status_t; +} diff --git a/db/untrusted/src/enclave.rs b/db/untrusted/src/enclave.rs new file mode 100644 index 00000000000..87d38e11fe6 --- /dev/null +++ b/db/untrusted/src/enclave.rs @@ -0,0 +1,129 @@ +//! Enclave database interface. +use sgx_types::*; + +use ekiden_common::error::{Error, Result}; +use ekiden_enclave_untrusted::Enclave; + +use super::ecall_proxy; + +/// Enclave database interface. +pub trait EnclaveDb { + /// Maximum response size (in kilobytes). + const MAX_RESPONSE_SIZE: usize = 1024; + + /// Compute difference between states. + fn db_state_diff(&self, old: &Vec, new: &Vec) -> Result>; + + /// Apply difference between states to an existing state. + fn db_state_apply(&self, old: &Vec, diff: &Vec) -> Result>; + + /// Set enclave state. + fn db_state_set(&self, state: &Vec) -> Result<()>; + + /// Retrieve enclave state. + /// + /// If nothing was modified since the last import, this method will return an empty + /// vector. + fn db_state_get(&self) -> Result>; +} + +impl EnclaveDb for Enclave { + /// Compute difference between states. + fn db_state_diff(&self, old: &Vec, new: &Vec) -> Result> { + // Reserve space up to the maximum size of serialized response. + let mut diff: Vec = Vec::with_capacity(Self::MAX_RESPONSE_SIZE * 1024); + let mut diff_length = 0; + + let status = unsafe { + ecall_proxy::db_state_diff( + self.get_id(), + old.as_ptr() as *const u8, + old.len(), + new.as_ptr() as *const u8, + new.len(), + diff.as_mut_ptr() as *mut u8, + diff.capacity(), + &mut diff_length, + ) + }; + + if status != sgx_status_t::SGX_SUCCESS { + return Err(Error::new("Failed to call enclave state diff")); + } + + unsafe { + diff.set_len(diff_length); + } + + Ok(diff) + } + + /// Apply difference between states to an existing state. + fn db_state_apply(&self, old: &Vec, diff: &Vec) -> Result> { + // Reserve space up to the maximum size of serialized response. + let mut new: Vec = Vec::with_capacity(Self::MAX_RESPONSE_SIZE * 1024); + let mut new_length = 0; + + let status = unsafe { + ecall_proxy::db_state_apply( + self.get_id(), + old.as_ptr() as *const u8, + old.len(), + diff.as_ptr() as *const u8, + diff.len(), + new.as_mut_ptr() as *mut u8, + new.capacity(), + &mut new_length, + ) + }; + + if status != sgx_status_t::SGX_SUCCESS { + return Err(Error::new("Failed to call enclave state apply")); + } + + unsafe { + new.set_len(new_length); + } + + Ok(new) + } + + /// Set enclave state. + fn db_state_set(&self, state: &Vec) -> Result<()> { + let status = unsafe { + ecall_proxy::db_state_set(self.get_id(), state.as_ptr() as *const u8, state.len()) + }; + + if status != sgx_status_t::SGX_SUCCESS { + return Err(Error::new("Failed to call enclave state set")); + } + + Ok(()) + } + + /// Retrieve enclave state. + fn db_state_get(&self) -> Result> { + // Reserve space up to the maximum size of serialized response. + let mut state: Vec = Vec::with_capacity(Self::MAX_RESPONSE_SIZE * 1024); + let mut state_length = 0; + + let status = unsafe { + ecall_proxy::db_state_get( + self.get_id(), + state.as_mut_ptr() as *mut u8, + state.capacity(), + &mut state_length, + ) + }; + + if status != sgx_status_t::SGX_SUCCESS { + return Err(Error::new("Failed to call enclave state get")); + } + + unsafe { + state.set_len(state_length); + } + + Ok(state) + } +} diff --git a/db/untrusted/src/lib.rs b/db/untrusted/src/lib.rs new file mode 100644 index 00000000000..bffbb8fa823 --- /dev/null +++ b/db/untrusted/src/lib.rs @@ -0,0 +1,11 @@ +extern crate sgx_types; + +extern crate ekiden_common; +extern crate ekiden_enclave_untrusted; + +pub mod enclave; +#[doc(hidden)] +pub mod ecall_proxy; + +// Exports. +pub use enclave::EnclaveDb; diff --git a/docker/deployment/Dockerfile.build b/docker/deployment/Dockerfile.build new file mode 100644 index 00000000000..31236c0e32f --- /dev/null +++ b/docker/deployment/Dockerfile.build @@ -0,0 +1,35 @@ +FROM docker.io/ekiden/development:0.1.0-alpha.0 + +ENV HOME="/root" +ENV PATH="${HOME}/.cargo/bin:${PATH}" +ENV INTEL_SGX_SDK="/opt/sgxsdk" +ENV SGX_MODE="SIM" +ENV SGX_ARCH="x64" + +# We need a newer version of python-protobuf than is available in the distro. +# Installing this stuff through pip also sidesteps the need to upgrade gcc. +RUN apt-get update # 20180204 +RUN apt-get install -y --no-install-recommends python-pip +# Benchmarks don't run the evaluation, so no scipy or sklearn +RUN pip install numpy pandas xlrd protobuf + +# Copy code. +ADD . /code + +# Build all Ekiden binaries and resources. +RUN cd /code && \ + cargo make build-release-flow + +# Package all binaries and resources. +RUN mkdir -p /package/bin /package/lib /package/res && \ + cp /code/target/enclave/*.signed.so /package/lib && \ + cp /code/target/enclave/*.mrenclave /package/lib && \ + cp /code/target/release/ekiden-compute /package/bin && \ + cp /code/target/release/ekiden-consensus /package/bin && \ + cp /code/docker/deployment/Dockerfile.runtime /package/Dockerfile + +# This is a builder container which outputs the contents of the package +# on standard output. This enables the runtime and the builder container +# to be different and reduces the image size considerably. +WORKDIR /package +CMD tar cvzhf - . diff --git a/docker/deployment/Dockerfile.runtime b/docker/deployment/Dockerfile.runtime new file mode 100644 index 00000000000..6fa71fd75b0 --- /dev/null +++ b/docker/deployment/Dockerfile.runtime @@ -0,0 +1,9 @@ +FROM tozd/sgx:ubuntu-xenial + +ENV PATH="/ekiden/bin:${PATH}" + +# install dependencies needed by learner contracts +RUN apt-get install -y python-pip +RUN pip install numpy pandas protobuf xlrd + +ADD . /ekiden diff --git a/docker/deployment/build-images.sh b/docker/deployment/build-images.sh new file mode 100755 index 00000000000..dbbb0ed55b4 --- /dev/null +++ b/docker/deployment/build-images.sh @@ -0,0 +1,21 @@ +#!/bin/bash -e + +base_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd ) + +pushd ${base_dir} + +# Build the builder Docker image first. +docker build \ + --force-rm \ + -t ekiden/core-builder \ + -f docker/deployment/Dockerfile.build . + +# Build the deployable image from the builder image. +docker run \ + --rm ekiden/core-builder \ + | docker build \ + --rm --force-rm \ + -t ekiden/core \ + - + +popd diff --git a/docker/development/Dockerfile b/docker/development/Dockerfile new file mode 100644 index 00000000000..0360c636ba7 --- /dev/null +++ b/docker/development/Dockerfile @@ -0,0 +1,10 @@ +FROM docker.io/ekiden/rust-sgx-sdk:0.9.7 + +ENV HOME="/root" +ENV PATH="${HOME}/.cargo/bin:${PATH}" + +RUN apt-get update -q -q && \ + apt-get install -y pkg-config python-pyelftools && \ + rustup update nightly && \ + cargo +nightly install rustfmt-nightly --version 0.3.6 --force && \ + cargo install cargo-make diff --git a/docker/testing/Dockerfile b/docker/testing/Dockerfile new file mode 100644 index 00000000000..8b68e956b0b --- /dev/null +++ b/docker/testing/Dockerfile @@ -0,0 +1,34 @@ +FROM docker.io/ekiden/development:0.1.0-alpha.0 + +# This is the release of tendermint to pull in. +ENV TM_VERSION 0.13.0 +ENV TM_SHA256SUM 36d773d4c2890addc61cc87a72c1e9c21c89516921b0defb0edfebde719b4b85 + +# Tendermint will be looking for genesis file in /tendermint (unless you change +# `genesis_file` in config.toml). You can put your config.toml and private +# validator file into /tendermint. +# +# The /tendermint/data dir is used by tendermint to store state. +ENV DATA_ROOT /tendermint +ENV TMHOME $DATA_ROOT + +# Set user right away for determinism +RUN adduser --system --group tmuser + +# Create directory for persistence and give our user ownership +RUN mkdir -p $DATA_ROOT && \ + chown -R tmuser:tmuser $DATA_ROOT + +# jq and curl used for extracting `pub_key` from private validator while +# deploying tendermint with Kubernetes. It is nice to have bash so the users +# could execute bash commands. +RUN apt-get install -y jq + +RUN wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \ + echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \ + unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \ + rm -f tendermint_${TM_VERSION}_linux_amd64.zip + +# Leave $DATA_ROOT in the ephemeral filesystem so that we start tests with a clean state. + +# Don't expose services from testing instance. diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000000..0b838341b5c --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,17 @@ +# Contributing + +TODO + +## Making a release + +Once everything is ready for a release, you can use the `./scripts/make-release.py` script to prepare a release. This script covers the following steps of a release process: + +* Bumps all versions and versions of all internal dependencies. +* Optionally (default: no) bumps and builds Docker images. +* Commits the version bumps. +* Creates a tag for the version. +* Optionally (default: yes) calls `cargo publish` on crates. +* Optionally (default: no) bumps the repository (master) to a new pre-release version and commits this change. +* Optionally (default: yes) pushes changes to Git. + +See `./scripts/make-release.py --help` for more information on usage. diff --git a/docs/database.md b/docs/database.md new file mode 100644 index 00000000000..c7f65fa4e30 --- /dev/null +++ b/docs/database.md @@ -0,0 +1,104 @@ +# Database + +The Ekiden database serves as a persistent state store for contracts. + +## Interfaces + +Only the trusted API exposed to contracts should be considered relatively stable. **How state is serialized and stored outside the contract and the EDL interface is currently an unstable implementation detail which will change in future versions.** + +### Trusted API exposed to contracts + +#### Low-level interface + +```rust +/// Database interface exposed to contracts. +pub trait Database { + /// Returns true if the database contains a value for the specified key. + fn contains_key(&self, key: &[u8]) -> bool; + + /// Fetch entry with given key. + fn get(&self, key: &[u8]) -> Option>; + + /// Update entry with given key. + /// + /// If the database did not have this key present, [`None`] is returned. + /// + /// If the database did have this key present, the value is updated, and the old value is + /// returned. + fn insert(&mut self, key: &[u8], value: &[u8]) -> Option>; + + /// Remove entry with given key, returning the value at the key if the key was previously + /// in the database. + fn remove(&mut self, key: &[u8]) -> Option>; + + /// Clear database state. + fn clear(&mut self); +} +``` + +#### Schema-based interface + +Since the low-level database interface can be tedious to use, the database also exposes a schema-based interface. Using this interface, you first define a database schema and then database manipulation functions will be generated automatically. + +Schema definition looks as follows (see the `database_schema!` macro documentation for more information): +```rust +database_schema! { + pub struct TestSchema { + pub foo: String, + pub bar: String, + pub moo: u64, + pub balance_of: Map, + } + + // Schema structs are namespaced, so defining a struct with a different name + // will cause its fields to be in a different namespace. + pub struct AnotherSchema { + pub foo: String, + pub bar: String, + } +} +``` + +Example use: +```rust +let schema = TestSchema::new(); + +// Test scalars. +assert!(!schema.foo.is_present()); +assert!(!schema.bar.is_present()); +assert!(!schema.moo.is_present()); + +assert_eq!(schema.foo.insert("hello world"), None); +assert_eq!(schema.moo.insert(&42), None); + +assert!(schema.foo.is_present()); +assert!(!schema.bar.is_present()); +assert!(schema.moo.is_present()); + +assert_eq!(schema.foo.get(), Some("hello world".to_owned())); +assert_eq!(schema.moo.get(), Some(42)); + +assert_eq!(schema.moo.remove(), Some(42)); +assert!(!schema.moo.is_present()); + +assert_eq!(schema.foo.insert("another"), Some("hello world".to_owned())); + +// Test map. +assert_eq!(schema.balance_of.insert("inner_key", &42), None); +assert!(schema.balance_of.contains_key("inner_key")); +assert!(!schema.balance_of.contains_key("foo")); + +assert_eq!(schema.balance_of.insert("inner_key", &100), Some(42)); +``` + +For scalar fields, field names are translated to underlying database keys as follows: +``` +db_key := namespace field_name +``` + +For map fields, keys are translated to underlying database keys as follows: +``` +db_key := namespace field_name key +``` + +In both cases `namespace` is the name of the structure that defines the schema (e.g., `TestSchema` and `AnotherSchema` in the above example). All strings are encoded as UTF-8. diff --git a/docs/enclave-backtrace.md b/docs/enclave-backtrace.md new file mode 100644 index 00000000000..8e907cf473f --- /dev/null +++ b/docs/enclave-backtrace.md @@ -0,0 +1,6 @@ +# Enabling backtrace in an enclave +```rust +std::backtrace::enable_backtrace("xxx", std::backtrace::PrintFormat::Short).expect("Failed to enable backtrace"); +``` + +Call [`enable_backtrace`](https://github.com/baidu/rust-sgx-sdk/blob/master/sgx_tstd/src/backtrace.rs#L40-L48) with the path of your enclave's file image (`xxx` above). diff --git a/docs/enclave-identity.md b/docs/enclave-identity.md new file mode 100644 index 00000000000..723eb3c5335 --- /dev/null +++ b/docs/enclave-identity.md @@ -0,0 +1,88 @@ +# Enclave identity +In this module, an enclave persistence maintains an identity for itself. + +## State +* An immutable identity for an enclave persistence. Lives as sealed data. Consists of: + * an asymmetric key pair used to bootstrap secure communications + * if we need monotonic counters, the IDs of those counters + +Keys are generated inside the secure enclave and only leave the enclave in a sealed form (for persistence). + +### Current implementation +The identity contains: + +* RPC long-term contract key `E` + +## Interfaces + +### Enclave edge interfaces +These are not high-fidelity method signatures. +For example, outputs may be pointer arguments instead of return values. + +* ECALL `identity_create() -> sealed_identity`: + Generates an identity for a new enclave persistence and exports it in a sealed form. + Does not start using that identity. + Call `identity_restore` to start using it. +* ECALL `identity_restore(sealed_identity) -> public_identity`: + Populates an enclave launch with an identity. + The enclave launch must not have already restored an identity. + Gives the public identity string back. + The enclave launch caches the identity in ephemeral enclave memory so that we don't have to pass the sealed ID and unseal it for every entry. +* ECALL `identity_create_report(target_info) -> report`: + Create a report for use in the enclave identity proof. + The enclave launch must have an identity. + The report data is specified below. +* ECALL `identity_set_av_report(av_report) -> void`: + Populates an enclave launch with an attestation verification report (AVR). + The enclave launch caches the AVR for internal use, for example, if it needs its own enclave identity proof (specified below). + +### Trusted interfaces +* `IDENTITY: identity` +* `get_proof() -> identity_proof` + +### Untrusted interfaces +* `EnclaveIdentity::identity_init() -> identity_proof` + +## Public identity string +The public identity of an enclave persistence established this way is a string that canonically encodes the public parts of the identity. + +Protocol buffers would not be ideal because [the specification does not define a canonical form](https://gist.github.com/kchristidis/39c8b310fd9da43d515c4394c3cd9510). +However, it would be sufficient to have a deterministic encoding, even if it does not define a canonical encoding. +We might be able to achieve that with a subset of Protocol buffers that excludes things like unknown fields and mappings. + +### Current implementation +The public identity string is encoded as a bare Sodalite public key. + +## Report data +* Quote context (64 bits) +* Version of public identity string format (64 bits, little-endian) +* Padding (128 bits) +* Digest of public identity string (256 bits) + +This allows us to fit a potentially large public identity string in the report data. +It may help allow changes to the format of the public identity string. + +### Current implementation +The quote context is `EkQ-Iden`. + +The identity version is 0. + +The padding is all zero. + +The digest algorithm is SHA-512 truncated to use the first 256 bits. + +## Enclave identity proof +It's the **public identity string** and an **attestation verification report** (AVR) (includes quote; quote includes report). + +A compute node creates this proof by calling `create_report`, getting a revocation list, getting a quote, and verifying that quote. + +To verify: +* Verify the signature (chain) on the AVR. +* Check that the AVR is recent enough. +* Check that the AVR says that the quote is okay. +* (we don't care about the quote outside of the report; that's IAS's problem) +* Check that the report data was derived from the public identity string. + +This tells you *only* that all this identity came from **some** enclave persistence running **some** enclave program on **some** platform that IAS trusts (recently trusted). It's only the *authentication*. Next, for *authorization*, you would have to apply some policy to the information (e.g., the MRENCLAVE and flags in the report). + +These proofs are intended to be valid for a period of time, so that the system can use keys in the enclave identity to sign and verify messages without contacting IAS. Currently we have it so that AVRs expire after a while. This would be much better if IAS would include a timestamp on its signed revocation list. Then we could allow them to be valid until the revocation list changes. diff --git a/docs/hw-benchmarking.md b/docs/hw-benchmarking.md new file mode 100644 index 00000000000..abb78b42db2 --- /dev/null +++ b/docs/hw-benchmarking.md @@ -0,0 +1,14 @@ +# Benchmarking on SGX hardware + +1. Check out a contract. Initialize submodules. +2. Enter the contract's container using this repo's scripts/sgx-enter-hw.sh. + This variant of the script sets flags for building for hardware SGX. +3. In container: Build ekiden, the contract, and the benchmarking programs. + Use release mode. +4. In container: Start aesmd using this repo's scripts/start-aesmd.sh. + This script is meant to be sourced, and it starts a background job in your current shell. + +Then start the nodes and run the benchmarking program. +See [/testnet/tendermint/README.md](/testnet/tendermint/README.md) for how to use the consensus testnet. + +See issue [#292](https://github.com/sunblaze-ucb/ekiden/issues/292) for a sample set of commands we used to this in our experiments. diff --git a/docs/profiling.md b/docs/profiling.md new file mode 100644 index 00000000000..1bf699186f5 --- /dev/null +++ b/docs/profiling.md @@ -0,0 +1,63 @@ +# Profiling + +## Non-SGX + +To profile non-SGX portions of Ekiden, you can use standard tools like `valgrind`. Note that there +is a bug in older Valgrind versions, which makes it incorrectly advertise RDRAND support in CPUID +and when it is used it crashes with an illegal instruction error. For this reason be sure to use +Valgrind version 3.13 or greater which is known to work. + +After installing Valgrind, you can use it as normal (e.g., for profiling the compute node): +```bash +$ valgrind \ + --tool=callgrind \ + --callgrind-out-file=callgrind.out \ + target/debug/ekiden-compute target/enclave/token.signed.so +``` + +After the program terminates (you can interrupt it using CTRL+C), you can run the annotate tool +to get a human-readable report: +```bash +$ callgrind_annotate callgrind.out +``` + +## SGX + +### Setting up the environment +1. host: install SGX driver +1. host: install vtune, including collection driver +1. make /code available on host for vtune + (you can symlink it) + (todo: any better ways to do this?) +1. make /opt/intel/vtune_amplifier_2018.1.0.535340 available in the container for runtime libs + (you can do this with a volume mount, but it's not built in to scripts/sgx-enter-hw.sh) + (see 19389292a4ecf889ba8a4ed20d1b58d9f3156f8e for how to undo this) +1. host: set /proc/sys/kernel/yama/ptrace_scope to 0 + (setup recommends, but we have to profile as superuser anyway) + +### Building the project +1. container: `export SGX_MODE=HW` + (scripts/sgx-enter-hw.sh sets this) +1. add `-C opt-level=3` to `RUSTFLAGS` in `tasks.env-debug.env` and `tasks.env-sgx-xargo` in Makefile.toml + (see d826188ca5232cb9b342a46ebe67a90db2726afe for how to undo this) +1. container: `cargo make` + +### Collecting a profile +1. start container +1. container: `export INTEL_LIBITTNOTIFY64=/opt/intel/vtune_amplifier_2018.1.0.535340/lib64/runtime/libittnotify_collector.so` + (adapted from https://software.intel.com/en-us/node/708952) +1. container: `. scripts/start-aesmd.sh` + (source, it creates a background job) + (requires privileged container, or it can't access the sgx service) + (scripts/sgx-enter-hw.sh runs the container privileged) + (todo: privileged container is undesirable for production) +1. container: start nodes without batch timeout +1. host: `sudo su` +1. host, as superuser: `. /opt/intel/vtune_amplifier_2018.1.0.535340/amplxe-vars.sh` +1. host, as superuser: `amplxe-cl -collect advanced-hotspots -duration=60 -analyze-system` + (specifying a `-target-pid` in a container freezes docker) + (using sgx-hotspots analysis causes kernel oops) +1. container: `./target/debug/my-client --mr-enclave $(cat target/enclave/my.mrenclave) --benchmark-threads=1 --benchmark-runs=10` +1. host, as superuser: ctrl-c ampxle-cl +1. host, as superuser: `amplxe-cl -finalize -r rxxxah` +1. host: `ampxle-cl -report hotspots -r rxxxah` diff --git a/docs/rpc-drawio.xml b/docs/rpc-drawio.xml new file mode 100644 index 00000000000..f985a0f1bd7 --- /dev/null +++ b/docs/rpc-drawio.xml @@ -0,0 +1 @@ +7V1bc5s4FP41fmyG++WxTZvuQzvT2e7Mdp86BBSbFiMX5CTur18JBDY6wsZE4NTgtAkIEPh8R+cqHRbm7fr5YxZsVp9xhJKFoUXPC/P9wjB8w1iwf1q0Kxss1ysbllkclU36vuFr/BvxRo23buMI5Y0TCcYJiTfNxhCnKQpJoy3IMvzUPO0BJ827boIlAg1fwyCBrf/GEVnxVtPW9gf+QvFyxW9dH7gPwp/LDG9Tfr+FYT4Un/LwOqj64ufnqyDCTwdN5oeFeZthTMqt9fMtShhpK7KV1921HK2fO0Mp6XKBy5/jMUi2/LuHScwuLh+P7CqSFF8Kscu0hfnuaRUT9HUThOzoE+UB2rYi64Tu6XTzIU6SW5zgjO6nOEWsCaeEA62zHoIkXqZ0J0EP9EnfPaKMxBSAt7yZYNZlTu8Qp8t/2M77N9a+5VNx1XuDtZAM/0TV3SjB7+5u6YcegcTg9GE3Q88HTZw4HxFeI5Lt6CnVUU3j1+yaHPq0ZwuzAnN1wBGGwxsDzorLuu89HHSDIyJHx/EhOni92RI0w1Oyr6WdQsewZei4CtAxZ3SOo2M6F0THMgE6KA2T4HFGhwsX4yQ6uj7Y2PFmvXN06JzERq51VGCjw5FzGzC6piQLQvI3+rVFuUKg9jbSy9A6wKbohj+aLsVTCUZNkOyuA8hQARI03G6LATTDUx11m/D4HeHRVVhuLkDnSxLE6QxRAyLRejO7QqSrUEEAIqWARAHyHsIjgISUbiiTKRIn9ND9gxoS2wKJXUhiXyajVFhggMC0ixWOAJnpVyFNWjZJUmlyqNxPMPc6jiJ2Eyl0TXBFlAYgvUQ/DEV6e4q8LbOShiKwAwi8CXYJDibL3BLtOhTtoetwm+02BL/Dz2q53EZeZPVVqS0K9NCBgE6GOFo84950HEX2kHda28ogU6JsYaiEOuMMNRR9z1rMoSsdOQCHEcWWrgMcrlExmPpJ01/K6Sosf92YumoQqT+iyalDm/NDGfSbfa7K5xJCS7KohTS0pAQfaJhek0tcZ/MUjSTLuqAcgyZuGaGdmr4WQRhRnFV9XLm6tp3TImmwQGpFUpgnqhj9+/2OoHwq7C6C0TUmpwSLDjE5lEZv2QyPPTmjIF8VFNGPwbFg8Xr2qY9UMzv0cwiJosbMEUjGAzrJcptVW4aSgMSPqNG5jHb8Dl9wzJJjtRMh+t+mQP4cb7MQ8av2CMCO2rzCqiMSZEtEQEcFlPXX7oYuNM5mdDuhWwfKX4ou6EghutC04y7+HyE5M0woYpj18sZX5elX5m4dfLfHMx1gBipHWUwJ+BtOgJgKIIY45cGRKDdrKERg3HIWf1KpZYhSy+4p/sRIBOhIofiDEc4Z3U7o+orAFftRh21lEjVCelk+XTlqitbnmHLUnJ2EjiPN0hTJUTEEMqAcNWcnoSe6feVoW5BxAGw7TEuYsS26ETDpO3DbImsDYAvDxTO2nbBVBu6Q6M7epCQxYLsN+usmdO91z4OMpGRyHdST+7B1vsFpjqYVt3YEt2LMuLU1jfl2IolHzdNY5ykYOhTyPA7VqRVKtmz3jcNX7PzHdm5suvtju97wc53iaLWk1Sy6DjJysN8KRCmuOfe+KpUleNp1VPlcjSUOUTFPrU5hWecF25Rzi3qOsF8zR4AR3tmG0W50e/9TpYjqGb3+jefWP95g3AKDd/W0oVKXKhXnf+S8IU+/oPC3u0QF6MioqMANliZIIV6zIV5sN4Y5HY2CX1K03cXsebi8l4gK2shPYd38QITsOJjBlmDahDOywkucBsknXGAxmGtTDjdOq67uzgtFgOc0s1vDxQ2qbySbitQyPK/UyPWEBNaIc5HsDjZuuM0ea3f+9avXV6JLxcW+dWLj7IyIOG2go31FYQp2B6dt2An5kQf2hfvw/bbnMsRAhdM4n26UT9BbPkDXIC8GJqv8ktHfmKwQ+0tWVC3SL0Z7dBI2uqP4kW4u2Sa6Wd4U3IwzVhzG0FIcsT85SlBYhA74NfRpDi6biuTRdU/OpIf635QMEnE+Ua/VVjAB11yRPhtpXPi3xI1PGmkq1vg401hc4gtSdsxJ2Y4BSDyxxSUi9Ue0gJw58dzRovEFX9Hzb/yDj+f3s29E7E90q84BcaD7Oac/PLG605iTQJw5ldxzLPZON4qjb8B0ozMnkzui6/pyg+/syHxLdG8AbGEqeca2E7bKwB0SXZh3mfZUSVcT1gBYEi05lMXqQnehWm47e8z8qHbRMmHQpWjUCZtR4kcNYbLjuKXC3A72yBVENnSQ1h2xNIkL7YKJhTYA+ccsquTOVZV66o9LllVyj9dVmlZyFEIxoviqbj27NSenDomFWfuvXW+1CtQ7Nl6H5MaMrxTf/qvXRXwHXL7uQVcpQlNfvm5dbv26Bz2jaQcSfCEaNGq43ZtX5XWVfpqq5T0gMjFg2M6b0yl98e277LI18DQAuhdeOnHWbD30HJNvvCO2vV9l0coah7Nfq1DNa2EX8SURvddO6KYoWETBrpBhpG7lYU20P0INK9C6YrXMMVeV+dDlyDbh1BAQB9CoCECnYJo1MEWXTjLrajgQJJ4Ay5FMDQPxZSmd39elAoPTq42nAoOYTB9VHkEzvdQI04JAnOU8KgRHyiJPCwXgwIyqFCTZQq4UJoYCiIGPqhc6LLO+xsT4mK/R8qEzFhZLk6bC4CLtR3yPll4NrimETqrBfP2hE9CTutCJrkGf8cpfcQHGsoTT2r0aIcfkd0sxqXnFhSZ50+2MVTtWYtlPrZskVgQWtLxnsI75qfLyDONgBc3zGasjWAllHcYVgpKZf9c0/Vg9WMKsiHGlYIc642eV4djXNSssxbqyWWVE1iXPaoPS2FuUgql40sSsHdHDylZcDV/KznRAgqGnmWm3lMVQXX7DtuUP3Frg128ybFW6QVX5jRrUBk+WxTLY8Gwwp/Nri6sDb/Ji/L+lJ+ja5nl/sCqu8XmbkHiToALkQnFQstxVIZ6c0Wy5zNAyIJTd6QOlrIX+vw9IuDqo11E+xWAFO+ogxAs8Z9VSyhHWHcl0il7V9FH/mtEOsyWVS6mm2+u+WEpdSiC5Qly1dz0gz7FuPNvVLaP87Tc5YqDqQF5LOaP2pzwqzs4VT0xAYCZF9qfT8bb6jCPEzvgf \ No newline at end of file diff --git a/docs/rpc.md b/docs/rpc.md new file mode 100644 index 00000000000..d719dd69714 --- /dev/null +++ b/docs/rpc.md @@ -0,0 +1,163 @@ +# RPC + +## Defining an API + +An API may be defined by using the `rpc_api` macro provided by `ekiden_core`. It is usually defined in its own API crate as it needs to be available for import both for enclaves and clients. + +A simple API definition looks as follows: +```rust +rpc_api! { + metadata { + name = dummy; + version = "0.1.0"; + client_attestation_required = false; + } + + rpc hello_world(HelloWorldRequest) -> HelloWorldResponse; +} +``` + +There are a few different things in here: +* The `metadata` section defines some metadata, which name and version the API and expose some additional attributes. +* RPC method definitions, each starting with the keyword `rpc`. + +An RPC method definition looks similar to a Rust function definition and is composed from the following parts: +* Method name (e.g., `hello_world`) which defines how the method will be called. +* Request type (e.g., `HelloWorldRequest`) which defines the Rust type containing the request message. Currently, this must be a Protocol Buffers message type. +* Response type (e.g., `HelloWorldResponse`) which defines the Rust type containing the response message. Currently, this must be a Protocol Buffers message type. + +This same API definition can be used to generate both enclaves and clients. This is achieved by making the `rpc_api` generate in its place another macro called `with_api` which can be used from both enclaves and clients. + +## Creating an enclave RPC server + +In order to create an enclave RPC server using the API we just defined, we need to import the API and instruct the RPC system to generate some glue code that will call our method implementations. +This can be done as follows: +```rust +#![feature(use_extern_macros)] + +use ekiden_trusted::rpc::create_enclave_rpc; +use dummy_api::{with_api, HelloWorldRequest, HelloWorldResponse}; + +with_api! { + create_enclave_rpc!(api); +} +``` + +This creates the glue that is needed to connect the API definitions to our method implementations. Next, we need to define the methods themselves: +```rust +fn hello_world(request: &HelloWorldRequest) -> Result { + let mut response = HelloWorldResponse::new(); + response.set_answer(request.get_question() + 42); + + Ok(response) +} +``` + +## Creating a client + +To create an RPC client for our API, we need to again import the API definitions and generate the required glue code: +```rust +#![feature(use_extern_macros)] + +use ekiden_rpc_client::create_client_rpc; +use dummy_api::{with_api, HelloWorldRequest, HelloWorldResponse}; + +with_api! { + create_client_rpc!(dummy, dummy_api, api); +} +``` + +This will create the client and necessary types inside a module named `dummy` (first argument to `create_client_rpc` macro). +We can use this to create clients that talk to an Ekiden Compute node over gRPC: +```rust +use ekiden_rpc_client::backend::Web3ContractClientBackend; + +// Create reactor (event loop) in a separate thread. +// TODO: Can this be simplified, it looks ugly? +let (tx, rx) = std::sync::mpsc::channel(); +std::thread::spawn(move || { + let mut reactor = tokio_core::reactor::Core::new().unwrap(); + tx.send(reactor.remote()).unwrap(); + reactor.run(futures::empty::<(), ()>()).unwrap(); +}); + +let remote = rx.recv().unwrap(); + +let client = dummy::Client::new( + Web3ContractClientBackend::new( + remote, + "hostname", + 9001, + ).unwrap(), + MrEnclave([0; 32]), // This needs to be an actual MRENCLAVE. +); + +let request = dummy::HelloWorldRequest::new(); +request.set_question(0); + +let response = client.hello_world(request).wait().unwrap(); +assert_eq!(response.get_answer(), 42); +``` + +## Internals + +### Secure channel protocol + +All remote procedure calls are made over a secure channel which is automatically established on first request. +This section provides detailed information about how the secure channel establishment protocol works. + +#### Keys +The following keys are maintained by the enclave identity for use in RPC: +* E - contract long-term public key pair. + +The following keys appear in the protocol and are maintained externally: +* C - client long-term public key pair. + The client maintains this key pair. + When a contract acts as a client, the enclave identity maintains this key pair. + +The following keys are maintained by the protocol: +* E' - contract short-term public key pair, generated by ekiden-rpc-trusted in the secure enclave for each client session. +* C' - client short-term public key pair, generated by the client before establishing a secure channel. + +The following statements are used by the protocol: +* IPE - enclave identity proof, refreshed periodically from IAS, which binds the enclave identity to the long-term public key E. +* IPC - client identity proof, which binds the client enclave identity to the long-term public key C. + +#### State +The secure channel can be in one of the following states: +* `Closed` + Channel is closed (this is the initial state) and must be reset. After being reset, the channel with transition into `Init` state. +* `Init` + Channel is being initialized. After successful contract authentication, the channel will transition into `ClientAuthenticating` or `Established` state. +* `ClientAuthenticating` + (Client only) Client is authenticating. After successful client authentication, the channel will transition into `Established` state. +* `Established` + Secure channel is established and may be used to make arbitrary method calls. + +#### Notation +`Box[X](C->E)` is a cryptographic box, encrypting and authenticating `X` from the client's public key `C` to the contract's public key `E`. The only people who can create or decipher `Box[X](C->E)` are the people who know the secret key corresponding to `C` and the people who know the secret key corresponding to `E`. + +#### Protocol +* Client sends `(C')`. +* Contract sends `(IPE, Box[E'](E->C'))`. +* If client authentication is required, Client sends `(C', Box[IPC, Box[C'](C->E)](C'->E'))`. +* From this point forward, for each request, the client transmits `(C', Box[request](C'->E')`, where `request` is a properly serialized request. +* From this point forward, for each response, the server transmits `(Box[response](E'->C'))` where `response` is a properly serialized response. +* The channel is now established. + +#### Nonces +While not explicitly mentioned above, each cryptographic box also contains a 24-byte nonce. Each nonce contains a 16-byte "nonce context" prefix, which prevents the message from being used in a different context. The following 8-bytes are generated based on the kind of message. + +Where long-term keys are involved (only the channel initialization messages), the nonces are generated using a cryptographically secure random generator. + +Where short-term keys are involved (unique for each session), the nonces are generated using a monotonically increasing counter. Both the client and the contract verify that each following nonce is greater than the last. This prevents message replays. + +#### Cryptography +The protocol uses NaCl primitives (e.g. the authenticated encryption is implemented using Curve25519, Salsa20, and Poly1305). + +### RPC types + +Making a call to a contract moves the method name, arguments, response code, return value, and state through several messages between the client, compute node, consensus node, and contract enclave. +See the diagram below for a summary of these types. + +![It's complicated](rpc.svg) diff --git a/docs/rpc.svg b/docs/rpc.svg new file mode 100644 index 00000000000..d99a7fbbc76 --- /dev/null +++ b/docs/rpc.svg @@ -0,0 +1,2 @@ + +
client
client
compute
compute
compute
compute
enclave
enclave
client
client
CallContractRequest
CallContractRequest
ClientRequest
ClientRequest
PlainClientRequest
PlainClientRequest
method
method
payload
payload
CryptoBox
CryptoBox
encrypted_request
encrypted_request
payload
payload
EnclaveRequest
EnclaveRequest
ClientRequest
ClientRequest
client_request
client_request
enclave_request_bytes
enclave_request_bytes
encrypt
encrypt
serialize
serialize
parse
parse
serialize
serialize
enclave_response_bytes
enclave_response_bytes
EnclaveResponse
EnclaveResponse
client_response
client_response
space for other things,
e.g. storage node selection
[Not supported by viewer]
CallContractResponse
CallContractResponse
payload
payload
serialize
serialize
parse
parse
ClientResponse
ClientResponse
PlainClientResponse
PlainClientResponse
payload
payload
CryptoBox
CryptoBox
encrypted_response
encrypted_response
decrypt
decrypt
parse
parse
enclave_request
enclave_request
rpc_request
rpc_request
client_request
client_request
plain_request
plain_request
enclave_response
enclave_response
rpc_response
rpc_response
client_response
client_response
plain_response
plain_response
code
code
ClientRequest
ClientRequest
ClientRequest
ClientRequest
ClientRequest
ClientRequest
ClientRequest
ClientRequest
ClientResponse
ClientResponse
Multiple requests/responses aggregated in a batch
[Not supported by viewer]
\ No newline at end of file diff --git a/docs/tracing.md b/docs/tracing.md new file mode 100644 index 00000000000..86821baa058 --- /dev/null +++ b/docs/tracing.md @@ -0,0 +1,22 @@ +# How to look at the metrics +1. Run the compute node with the metrics serving enabled, by passing a command line argument like `--metrics-addr=0.0.0.0:9091` to specify an address from which to serve them. +2. Set up Prometheus to scrape from that metrics address. It's done in [yet another yml file](https://prometheus.io/docs/prometheus/latest/getting_started/#configuring-prometheus-to-monitor-the-sample-targets). If you run the compute node in a container and Prometheus in the host, then you need to add a port publish for it. +3. Run Prometheus and look at [graphs](https://prometheus.io/docs/prometheus/latest/getting_started/#using-the-graphing-interface) or something. The metrics trace in this PR are listed below. + +# Metrics +## From GRPC handlers +* `reqs_received` (counter): Incremented in each request. +* `req_time_client` (histogram): Time spent by grpc thread handling a request. + +## From worker thread +* `reqs_batches_started` (counter): Incremented in each batch of requests. +* `req_time_batch` (histogram): Time spent by worker thread in an entire batch of requests. +* `req_time_enclave` (histogram): Time spent by worker thread in a single request. +* `consensus_get_time` (histogram): Time spent getting state from consensus. +* `consensus_set_time` (histogram): Time spent setting state in consensus. + +# How to add Prometheus metrics to your own processes +1. Add the `prometheus` package as a dependency and declare `#[macro_use] extern crate prometheus`. +2. When you initialize, use the macros `register_counter!(name, help)` [et al.](https://docs.rs/prometheus/0.3.10/prometheus/#macros), which (i) create a metric object and *register* it globally with the prometheus package. +3. In the code to be instrumented, manipulate those metric objects. For example, you might call `.inc()` on a [Counter](https://docs.rs/prometheus/0.3.10/prometheus/struct.Counter.html). +4. Expose the registered metrics over an HTTP server under the path `/metrics`. See [instrumentation.rs](../compute/src/instrumentation.rs#L95-L105) for how I've done it in the compute node with hyper. diff --git a/enclave/Makefile.toml b/enclave/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/enclave/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/enclave/common/Cargo.toml b/enclave/common/Cargo.toml new file mode 100644 index 00000000000..d915846e5c9 --- /dev/null +++ b/enclave/common/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "ekiden-enclave-common" +version = "0.1.0-alpha.1" +authors = [ + "Raymond Cheng ", + "Jernej Kos " +] +build = "build.rs" + +[dependencies] +base64 = "0.9.0" +byteorder = "1.2.1" +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +protobuf = "1.4.3" +serde_json = { git = "https://github.com/ekiden/json" } +sodalite = "0.3.0" + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +rand = "0.4.2" +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/enclave/common/Makefile.toml b/enclave/common/Makefile.toml new file mode 100644 index 00000000000..9dcb41fa936 --- /dev/null +++ b/enclave/common/Makefile.toml @@ -0,0 +1,4 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_SGX_TARGET = "true" diff --git a/enclave/common/build.rs b/enclave/common/build.rs new file mode 100644 index 00000000000..fc31814dda2 --- /dev/null +++ b/enclave/common/build.rs @@ -0,0 +1,11 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::generate_mod("src/generated", &["enclave_identity"]); + + ekiden_tools::protoc(ekiden_tools::ProtocArgs { + out_dir: "src/generated/", + input: &["src/enclave_identity.proto"], + includes: &["src/"], + }); +} diff --git a/enclave/common/src/enclave_identity.proto b/enclave/common/src/enclave_identity.proto new file mode 100644 index 00000000000..49f9a919843 --- /dev/null +++ b/enclave/common/src/enclave_identity.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package enclave_identity; + +message AvReport { + // Raw AV report body (must be raw as the signature is computed over it). The body + // is JSON-encoded as per the IAS API specification. + bytes body = 1; + // AV report signature. + bytes signature = 2; + // AV report signing certificate chain in PEM format. + bytes certificates = 3; +} + +message SavedIdentity { + // A space for an SGX sealed data struct that contains the enclave identity. + // The structure within the sealed data is internal to the enclave identity component. + bytes sealed_identity = 1; + // The current AV report used in the enclave identity proof for the saved enclave identity. + AvReport av_report = 2; +} + +message IdentityProof { + // The public identity string. + bytes public_identity = 1; + // The attestation verification report report. + AvReport av_report = 2; +} diff --git a/enclave/common/src/identity.rs b/enclave/common/src/identity.rs new file mode 100644 index 00000000000..6dafcddaa56 --- /dev/null +++ b/enclave/common/src/identity.rs @@ -0,0 +1,42 @@ +use sgx_types; + +use sodalite; + +/// Used in enclave identity proof. +const QUOTE_CONTEXT_IDENTITY: super::quote::QuoteContext = *b"EkQ-Iden"; + +/// Version of the public identity string format. +const IDENTITY_VERSION: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + +/// The components of a public identity string. +#[derive(Clone)] +pub struct PublicIdentityComponents { + /// Long term enclave key E used in RPC, public part. + pub rpc_key_e_pub: sodalite::BoxPublicKey, +} + +/// Pack components into a public identity string. +pub fn pack_public_identity(components: &PublicIdentityComponents) -> Vec { + components.rpc_key_e_pub.to_vec() +} + +/// Unpack components from a public identity string. +pub fn unpack_public_identity(public_identity: &[u8]) -> PublicIdentityComponents { + let mut components = PublicIdentityComponents { + rpc_key_e_pub: [0; sodalite::BOX_PUBLIC_KEY_LEN], + }; + components.rpc_key_e_pub.copy_from_slice(public_identity); + components +} + +/// Pack fields into a report data struct. +pub fn pack_report_data(public_identity: &[u8]) -> sgx_types::sgx_report_data_t { + let mut hash = [0; sodalite::HASH_LEN]; + sodalite::hash(&mut hash, public_identity); + let mut report_data = sgx_types::sgx_report_data_t::default(); + report_data.d[0..8].copy_from_slice("E_CONTEXT_IDENTITY); + report_data.d[8..16].copy_from_slice(&IDENTITY_VERSION); + // [16..32] is left zeroed + report_data.d[32..64].copy_from_slice(&hash[..32]); + report_data +} diff --git a/enclave/common/src/lib.rs b/enclave/common/src/lib.rs new file mode 100644 index 00000000000..631baf4e5a8 --- /dev/null +++ b/enclave/common/src/lib.rs @@ -0,0 +1,22 @@ +#![feature(use_extern_macros)] + +extern crate sgx_types; + +extern crate base64; +extern crate byteorder; +extern crate protobuf; +extern crate serde_json; +extern crate sodalite; + +#[macro_use] +extern crate ekiden_common; + +pub mod identity; +pub mod quote; + +// This is pub so that other crates can import our protos. +pub mod generated; + +pub mod api { + pub use generated::enclave_identity::*; +} diff --git a/enclave/common/src/quote.rs b/enclave/common/src/quote.rs new file mode 100644 index 00000000000..c9b5d6c3ee0 --- /dev/null +++ b/enclave/common/src/quote.rs @@ -0,0 +1,152 @@ +//! A portable system for parsing and verifying enclave identity proofs. + +use std::io::{Cursor, Read, Seek, SeekFrom}; +use std::ops::Deref; +use std::str::FromStr; + +use base64; +use byteorder::{LittleEndian, ReadBytesExt}; +use serde_json; + +use ekiden_common::error::{Error, Result}; +use ekiden_common::hex_encoded_struct; + +use super::api::IdentityProof; + +pub const QUOTE_CONTEXT_LEN: usize = 8; +/// The purpose of `QuoteContext` is to prevent quotes from being used in +/// different contexts. The value is included as a prefix in report data. +pub type QuoteContext = [u8; QUOTE_CONTEXT_LEN]; + +// MRENCLAVE. +hex_encoded_struct!(MrEnclave, MRENCLAVE_LEN, 32); + +/// Decoded report body. +#[derive(Default, Debug)] +struct ReportBody { + cpu_svn: [u8; 16], + misc_select: u32, + attributes: [u8; 16], + mr_enclave: MrEnclave, + mr_signer: [u8; 32], + isv_prod_id: u16, + isv_svn: u16, + report_data: Vec, +} + +/// Decoded quote body. +#[derive(Default, Debug)] +struct QuoteBody { + version: u16, + signature_type: u16, + gid: u32, + isv_svn_qe: u16, + isv_svn_pce: u16, + basename: [u8; 32], + report_body: ReportBody, +} + +impl QuoteBody { + /// Decode quote body. + fn decode(quote_body: &Vec) -> Result { + let mut reader = Cursor::new(quote_body); + let mut quote_body: QuoteBody = QuoteBody::default(); + + // TODO: Should we ensure that reserved bytes are all zero? + + // Body. + quote_body.version = reader.read_u16::()?; + quote_body.signature_type = reader.read_u16::()?; + quote_body.gid = reader.read_u32::()?; + quote_body.isv_svn_qe = reader.read_u16::()?; + quote_body.isv_svn_pce = reader.read_u16::()?; + reader.seek(SeekFrom::Current(4))?; // 4 reserved bytes. + reader.read_exact(&mut quote_body.basename)?; + + // Report body. + reader.read_exact(&mut quote_body.report_body.cpu_svn)?; + quote_body.report_body.misc_select = reader.read_u32::()?; + reader.seek(SeekFrom::Current(28))?; // 28 reserved bytes. + reader.read_exact(&mut quote_body.report_body.attributes)?; + reader.read_exact(&mut quote_body.report_body.mr_enclave.0)?; + reader.seek(SeekFrom::Current(32))?; // 32 reserved bytes. + reader.read_exact(&mut quote_body.report_body.mr_signer)?; + reader.seek(SeekFrom::Current(96))?; // 96 reserved bytes. + quote_body.report_body.isv_prod_id = reader.read_u16::()?; + quote_body.report_body.isv_svn = reader.read_u16::()?; + reader.seek(SeekFrom::Current(60))?; // 60 reserved bytes. + quote_body.report_body.report_data = vec![0; 64]; + reader.read_exact(&mut quote_body.report_body.report_data)?; + + Ok(quote_body) + } +} + +/// Authenticated information obtained from validating an enclave identity proof. +pub struct IdentityAuthenticatedInfo { + pub identity: super::identity::PublicIdentityComponents, + // TODO: add other av report/quote body/report fields we want to give the consumer + pub mr_enclave: MrEnclave, +} + +/// Verify attestation report. +pub fn verify(identity_proof: &IdentityProof) -> Result { + // TODO: Verify IAS signature. + + // Parse AV report body. + let avr_body = identity_proof.get_av_report().get_body(); + let avr_body: serde_json::Value = match serde_json::from_slice(avr_body) { + Ok(avr_body) => avr_body, + _ => return Err(Error::new("Failed to parse AV report body")), + }; + + // TODO: Check timestamp, reject if report is too old (e.g. 1 day). + + match avr_body["isvEnclaveQuoteStatus"].as_str() { + Some(status) => match status { + "OK" => {} + _ => { + return Err(Error::new(format!("Quote status was {}", status))); + } + }, + None => { + return Err(Error::new( + "AV report body did not contain isvEnclaveQuoteStatus", + )); + } + }; + + let quote_body = match avr_body["isvEnclaveQuoteBody"].as_str() { + Some(quote_body) => quote_body, + None => { + return Err(Error::new( + "AV report body did not contain isvEnclaveQuoteBody", + )) + } + }; + + let quote_body = match base64::decode("e_body) { + Ok(quote_body) => quote_body, + _ => return Err(Error::new("Failed to parse quote")), + }; + + let quote_body = match QuoteBody::decode("e_body) { + Ok(quote_body) => quote_body, + _ => return Err(Error::new("Failed to parse quote")), + }; + + // TODO: Apply common policy to report body, e.g., check enclave + // attributes for debug mode. + + // Check report data. + let public_identity = identity_proof.get_public_identity(); + let report_data_expected = super::identity::pack_report_data(public_identity); + if "e_body.report_body.report_data[..] != &report_data_expected.d[..] { + return Err(Error::new("Report data did not match expected")); + } + + Ok(IdentityAuthenticatedInfo { + identity: super::identity::unpack_public_identity(public_identity), + mr_enclave: quote_body.report_body.mr_enclave, + }) +} diff --git a/enclave/edl/Cargo.toml b/enclave/edl/Cargo.toml new file mode 100644 index 00000000000..fc54b71e180 --- /dev/null +++ b/enclave/edl/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "ekiden-enclave-edl" +version = "0.1.0" + +[dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/enclave/edl/Makefile.toml b/enclave/edl/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/enclave/edl/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/enclave/edl/src/identity.edl b/enclave/edl/src/identity.edl new file mode 100644 index 00000000000..de5dcadd67f --- /dev/null +++ b/enclave/edl/src/identity.edl @@ -0,0 +1,50 @@ +enclave { + include "sgx_report.h" + include "sgx_tseal.h" + + trusted { + /** + * Generate an identity for a new enclave persistence and export it in a sealed form. Does not start using + * that identity. Call `identity_restore` to start using it. + */ + // TODO: work out constant size + public void identity_create( + [out, size=sealed_identity_capacity] sgx_sealed_data_t *sealed_identity, + size_t sealed_identity_capacity, + [out] size_t *sealed_identity_length + ); + + /** + * Populate the enclave launch with an identity. The enclave launch must not have already restored an identity. + * Gives the public identity string back. The enclave launch caches the identity in ephemeral enclave memory + * so that we don't have to pass the sealed ID and unseal it for every entry. The enclave launch caches the + * identity in ephemeral enclave memory so that we don't have to pass the sealed ID and unseal it for every + * entry. + */ + // TODO: work out constant size + public void identity_restore( + [in, size=sealed_identity_length] const sgx_sealed_data_t *sealed_identity, + size_t sealed_identity_length, + [out, size=public_identity_capacity] uint8_t *public_identity, + size_t public_identity_capacity, + [out] size_t *public_identity_length + ); + + /** + * Create a report for use in the enclave identity proof. Data is specified in /docs/enclave-identity.md. + */ + public void identity_create_report( + [in] const sgx_target_info_t *target_info, + [out] sgx_report_t *report + ); + + /** + * Populate the enclave launch with an attestation verification report. The enclave launch caches the AVR for + * internal use, for example, if it needs its own enclave identity proof. + */ + public void identity_set_av_report( + [in, size=av_report_length] const uint8_t *av_report, + size_t av_report_length + ); + }; +}; diff --git a/enclave/edl/src/lib.rs b/enclave/edl/src/lib.rs new file mode 100644 index 00000000000..f536ad800c0 --- /dev/null +++ b/enclave/edl/src/lib.rs @@ -0,0 +1,6 @@ +#[macro_use] +extern crate ekiden_tools; + +define_edl! { + "identity.edl" +} diff --git a/enclave/trusted/Cargo.toml b/enclave/trusted/Cargo.toml new file mode 100644 index 00000000000..7ae4e955ac4 --- /dev/null +++ b/enclave/trusted/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "ekiden-enclave-trusted" +version = "0.1.0-alpha.1" +authors = [ + "Jernej Kos " +] + +[dependencies] +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../common", version = "0.1.0-alpha.1" } +lazy_static = { version = "1.0", features = ["spin_no_std"] } +protobuf = "1.4.3" +sodalite = "0.3.0" + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } diff --git a/enclave/trusted/Makefile.toml b/enclave/trusted/Makefile.toml new file mode 100644 index 00000000000..0914bd0f4f2 --- /dev/null +++ b/enclave/trusted/Makefile.toml @@ -0,0 +1,5 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_HOST_TARGET = "false" +BUILD_FOR_SGX_TARGET = "true" diff --git a/enclave/trusted/src/crypto.rs b/enclave/trusted/src/crypto.rs new file mode 100644 index 00000000000..c96002fd7fc --- /dev/null +++ b/enclave/trusted/src/crypto.rs @@ -0,0 +1,3 @@ +// Secret seed used for generating private and public keys. +pub const SECRET_SEED_LEN: usize = 32; +pub type SecretSeed = [u8; SECRET_SEED_LEN]; diff --git a/enclave/trusted/src/identity.rs b/enclave/trusted/src/identity.rs new file mode 100644 index 00000000000..0347cc34fd2 --- /dev/null +++ b/enclave/trusted/src/identity.rs @@ -0,0 +1,248 @@ +#[cfg(target_env = "sgx")] +use sgx_tse; +#[cfg(target_env = "sgx")] +use sgx_tseal::SgxSealedData; +#[cfg(target_env = "sgx")] +use sgx_types; +#[cfg(target_env = "sgx")] +use sgx_types::{sgx_attributes_t, sgx_sealed_data_t}; + +#[cfg(target_env = "sgx")] +use protobuf; +use sodalite; + +#[cfg(target_env = "sgx")] +use std; +#[cfg(not(target_env = "sgx"))] +use std::sync::Mutex as SgxMutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex; + +use ekiden_common::random; +use ekiden_enclave_common; + +use super::crypto::{SecretSeed, SECRET_SEED_LEN}; + +/// The format in which an identity is sealed an persisted. +#[cfg(target_env = "sgx")] +#[derive(Clone, Copy)] +struct IdentityExport { + /// Seed for RPC `E` key. + seed: SecretSeed, +} + +#[cfg(target_env = "sgx")] +unsafe impl sgx_types::marker::ContiguousMemory for IdentityExport {} + +#[derive(Clone)] +pub struct Identity { + /// Public parts. + pub public: ekiden_enclave_common::identity::PublicIdentityComponents, + /// Long term enclave key E used in RPC, private part. + pub rpc_key_e_priv: sodalite::BoxSecretKey, +} + +lazy_static! { + // Global cached identity. + static ref IDENTITY: SgxMutex> = SgxMutex::new(None); +} + +lazy_static! { + // Global cached AV report. + static ref AV_REPORT: SgxMutex> = + SgxMutex::new(None); +} + +/// ECALL, see edl +#[cfg(target_env = "sgx")] +#[no_mangle] +pub extern "C" fn identity_create( + sealed_identity: *mut sgx_sealed_data_t, + sealed_identity_capacity: usize, + sealed_identity_length: &mut usize, +) { + let mut export = IdentityExport { + seed: [0; SECRET_SEED_LEN], + }; + random::get_random_bytes(&mut export.seed).expect("ekiden_common::random::get_random_bytes"); + let sealed_data = SgxSealedData::::seal_data_ex( + 0x01, // KEYPOLICY_MRENCLAVE + sgx_attributes_t { + flags: 0xfffffffffffffff3, + xfrm: 0, + }, + 0xF0000000, + &[], + &export, + ).expect("SgxSealedData::seal_data_ex"); + let raw_data_len = SgxSealedData::::calc_raw_sealed_data_size( + sealed_data.get_add_mac_txt_len(), + sealed_data.get_encrypt_txt_len(), + ); + if raw_data_len as usize > sealed_identity_capacity { + panic!( + "Sealed identity too large ({}/{})", + raw_data_len, sealed_identity_capacity + ); + } + + unsafe { + sealed_data.to_raw_sealed_data_t(sealed_identity, raw_data_len); + } + *sealed_identity_length = raw_data_len as usize; +} + +/// Get the public identity string. +fn get_public_identity() -> Vec { + let guard = IDENTITY.lock().unwrap(); + let identity = guard.as_ref().expect("IDENTITY"); + ekiden_enclave_common::identity::pack_public_identity(&identity.public) +} + +/// ECALL, see edl +#[cfg(target_env = "sgx")] +#[no_mangle] +pub extern "C" fn identity_restore( + sealed_identity: *mut sgx_sealed_data_t, + sealed_identity_length: usize, + public_identity: *mut u8, + public_identity_capacity: usize, + public_identity_length: &mut usize, +) { + let sealed_data = unsafe { + SgxSealedData::::from_raw_sealed_data_t( + sealed_identity, + sealed_identity_length as u32, + ) + }.expect("SgxSealedData::from_raw_sealed_data_t"); + let unsealed_data = sealed_data + .unseal_data() + .expect("SgxSealedData::unseal_data"); + let export = unsealed_data.get_decrypt_txt(); + + let mut identity = Identity { + public: ekiden_enclave_common::identity::PublicIdentityComponents { + rpc_key_e_pub: [0; sodalite::BOX_PUBLIC_KEY_LEN], + }, + rpc_key_e_priv: [0; sodalite::BOX_SECRET_KEY_LEN], + }; + sodalite::box_keypair_seed( + &mut identity.public.rpc_key_e_pub, + &mut identity.rpc_key_e_priv, + &export.seed, + ); + + { + let mut guard = IDENTITY.lock().unwrap(); + // Abort if identity already initialized. + if guard.is_some() { + panic!("IDENTITY already initialized"); + } + *guard = Some(identity); + } + + let public_identity_src = get_public_identity(); + if public_identity_src.len() > public_identity_capacity { + panic!( + "Public identity string too large ({}/{})", + public_identity_src.len(), + public_identity_capacity + ); + } + let public_identity_dst = + unsafe { std::slice::from_raw_parts_mut(public_identity, public_identity_src.len()) }; + public_identity_dst.copy_from_slice(&public_identity_src); + *public_identity_length = public_identity_src.len(); +} + +/// For tests, generate an identity, cache that, generate a dummy AV report, and cache that. +#[cfg(not(target_env = "sgx"))] +pub fn nosgx_init_dummy() { + let mut guard = IDENTITY.lock().unwrap(); + // Skip if identity already initialized. + if guard.is_some() { + return; + } + + let mut seed: SecretSeed = [0; SECRET_SEED_LEN]; + random::get_random_bytes(&mut seed).expect("ekiden_common::random::get_random_bytes"); + + let mut identity = Identity { + public: ekiden_enclave_common::identity::PublicIdentityComponents { + rpc_key_e_pub: [0; sodalite::BOX_PUBLIC_KEY_LEN], + }, + rpc_key_e_priv: [0; sodalite::BOX_SECRET_KEY_LEN], + }; + sodalite::box_keypair_seed( + &mut identity.public.rpc_key_e_pub, + &mut identity.rpc_key_e_priv, + &seed, + ); + *guard = Some(identity); + + let mut av_report = ekiden_enclave_common::api::AvReport::new(); + av_report.set_body(b"{}".to_vec()); + av_report.set_signature(vec![]); + av_report.set_certificates(vec![]); + + { + let mut guard = AV_REPORT.lock().unwrap(); + if guard.is_some() { + panic!("AV_REPORT already initialized"); + } + *guard = Some(av_report); + } +} + +/// ECALL, see edl +#[cfg(target_env = "sgx")] +#[no_mangle] +pub extern "C" fn identity_create_report( + target_info: &sgx_types::sgx_target_info_t, + report: &mut sgx_types::sgx_report_t, +) { + let public_identity = get_public_identity(); + let report_data = ekiden_enclave_common::identity::pack_report_data(&public_identity); + *report = sgx_tse::rsgx_create_report(target_info, &report_data).expect("rsgx_create_report"); +} + +/// ECALL, see edl +#[cfg(target_env = "sgx")] +#[no_mangle] +pub extern "C" fn identity_set_av_report(av_report: *const u8, av_report_length: usize) { + let av_report_slice = unsafe { std::slice::from_raw_parts(av_report, av_report_length) }; + let av_report = + protobuf::parse_from_bytes(av_report_slice).expect("protobuf::parse_from_bytes av_report"); + { + let mut guard = AV_REPORT.lock().unwrap(); + if guard.is_some() { + panic!("AV_REPORT already initialized"); + } + *guard = Some(av_report); + } +} + +/// Get a copy of the identity. +pub fn get_identity() -> Identity { + IDENTITY + .lock() + .unwrap() + .as_ref() + .expect("IDENTITY not initialized") + .clone() +} + +/// Get the identity proof. +pub fn get_proof() -> ekiden_enclave_common::api::IdentityProof { + let mut identity_proof = ekiden_enclave_common::api::IdentityProof::new(); + identity_proof.set_public_identity(get_public_identity()); + identity_proof.set_av_report( + AV_REPORT + .lock() + .unwrap() + .as_ref() + .expect("AV_REPORT not initialized") + .clone(), + ); + identity_proof +} diff --git a/enclave/trusted/src/lib.rs b/enclave/trusted/src/lib.rs new file mode 100644 index 00000000000..c660133fb6c --- /dev/null +++ b/enclave/trusted/src/lib.rs @@ -0,0 +1,37 @@ +#[cfg(target_env = "sgx")] +extern crate sgx_trts; +#[cfg(target_env = "sgx")] +extern crate sgx_tse; +#[cfg(target_env = "sgx")] +extern crate sgx_tseal; +extern crate sgx_types; + +#[macro_use] +extern crate lazy_static; +extern crate protobuf; +extern crate sodalite; + +extern crate ekiden_common; +extern crate ekiden_enclave_common; + +pub mod crypto; +pub mod identity; +pub mod utils; + +/// Declare enclave initialization structures. +/// +/// **This macro must be used in each enclave in order for the initialization +/// handlers of other modules to work correctly.*** +#[macro_export] +macro_rules! enclave_init { + () => { + #[doc(hidden)] + #[no_mangle] + pub extern "C" fn __ekiden_enclave() { + // We define a symbol called __ekiden_enclave, which is forced to be + // used by the linker script. Without this, the .init_array section + // of the resulting library is removed by the linker and thus no + // initialization is done. + } + } +} diff --git a/enclave/trusted/src/utils.rs b/enclave/trusted/src/utils.rs new file mode 100644 index 00000000000..28485112e4b --- /dev/null +++ b/enclave/trusted/src/utils.rs @@ -0,0 +1,86 @@ +//! Enclave utilities. +use std::io::Cursor; +use std::slice::{from_raw_parts, from_raw_parts_mut}; + +#[cfg(target_env = "sgx")] +use sgx_trts::trts::rsgx_raw_is_outside_enclave; + +use ekiden_common::serializer::{Deserializable, Serializable}; + +/// Deserialize request buffer from untrusted memory. +/// +/// # EDL +/// +/// In order for this function to work, the source buffer must be declared using +/// the `[user_check]` attribute in the EDL. +/// +/// # Panics +/// +/// This function will panic if the source buffer is null or not in untrusted memory +/// as this may compromise enclave security. Failing to deserialize the request +/// buffer will also cause a panic. +pub fn read_enclave_request(src: *const u8, src_length: usize) -> R +where + R: Deserializable, +{ + if src.is_null() { + panic!("Source buffer must not be null"); + } + + // Ensure that request data is in untrusted memory. This is required because + // we are using user_check in the EDL so we must do all checks manually. If + // the pointer was inside the enclave, we could expose arbitrary parts of + // enclave memory. + #[cfg(target_env = "sgx")] + { + if !rsgx_raw_is_outside_enclave(src, src_length) { + panic!("Security violation: source buffer must be in untrusted memory"); + } + } + + let src = unsafe { from_raw_parts(src, src_length) }; + let mut cursor = Cursor::new(src); + R::read_from(&mut cursor).expect("Malformed enclave request") +} + +/// Copy serializable in trusted memory to response buffer in untrusted memory. +/// +/// # EDL +/// +/// In order for this function to work, the destination buffer must be declared +/// using the `[user_check]` attribute in the EDL. +/// +/// # Panics +/// +/// This function will panic if the destination buffer is null, too small to hold +/// the content of the source buffer or if the destination buffer is not in +/// untrusted memory as this may compromise enclave security. +pub fn write_enclave_response(src: &S, dst: *mut u8, dst_capacity: usize, dst_length: *mut usize) +where + S: Serializable, +{ + if dst.is_null() { + panic!("Destination buffer must not be null"); + } + + // Ensure that response data is in untrusted memory. This is required because + // we are using user_check in the EDL so we must do all checks manually. If + // the pointer was inside the enclave, we could overwrite arbitrary parts of + // enclave memory. + #[cfg(target_env = "sgx")] + { + if !rsgx_raw_is_outside_enclave(dst, dst_capacity) { + panic!("Security violation: destination buffer must be in untrusted memory"); + } + } + + // Serialize message to output buffer. + let dst = unsafe { from_raw_parts_mut(dst, dst_capacity) }; + let mut cursor = Cursor::new(dst); + let length = src.write_to(&mut cursor) + .expect("Failed to write enclave response"); + + unsafe { + *dst_length = length; + } +} diff --git a/enclave/untrusted/Cargo.toml b/enclave/untrusted/Cargo.toml new file mode 100644 index 00000000000..870ef016456 --- /dev/null +++ b/enclave/untrusted/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "ekiden-enclave-untrusted" +version = "0.1.0-alpha.1" +authors = [ + "Raymond Cheng ", + "Jernej Kos " +] + +[features] +sgx-simulation = [] + +[dependencies] +protobuf = "1.4.3" +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +sgx_urts = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../common", version = "0.1.0-alpha.1" } + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/enclave/untrusted/Makefile.toml b/enclave/untrusted/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/enclave/untrusted/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/enclave/untrusted/build.rs b/enclave/untrusted/build.rs new file mode 100644 index 00000000000..9eece0734a3 --- /dev/null +++ b/enclave/untrusted/build.rs @@ -0,0 +1,5 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::detect_sgx_features(); +} diff --git a/enclave/untrusted/src/ecall_proxy.rs b/enclave/untrusted/src/ecall_proxy.rs new file mode 100644 index 00000000000..c803624919c --- /dev/null +++ b/enclave/untrusted/src/ecall_proxy.rs @@ -0,0 +1,31 @@ +use sgx_types::{self, sgx_enclave_id_t, sgx_status_t}; + +extern "C" { + pub fn identity_create( + eid: sgx_enclave_id_t, + sealed_identity: *mut sgx_types::sgx_sealed_data_t, + sealed_identity_capacity: usize, + sealed_identity_length: &mut usize, + ) -> sgx_status_t; + + pub fn identity_restore( + eid: sgx_enclave_id_t, + sealed_identity: *const sgx_types::sgx_sealed_data_t, + sealed_identity_length: usize, + public_identity: *mut u8, + public_identity_capacity: usize, + public_identity_length: &mut usize, + ) -> sgx_status_t; + + pub fn identity_create_report( + eid: sgx_enclave_id_t, + target_info: &sgx_types::sgx_target_info_t, + report: &mut sgx_types::sgx_report_t, + ) -> sgx_status_t; + + pub fn identity_set_av_report( + eid: sgx_enclave_id_t, + av_report: *const u8, + av_report_length: usize, + ) -> sgx_status_t; +} diff --git a/enclave/untrusted/src/enclave.rs b/enclave/untrusted/src/enclave.rs new file mode 100644 index 00000000000..3780d9895fa --- /dev/null +++ b/enclave/untrusted/src/enclave.rs @@ -0,0 +1,47 @@ +//! Enclave interface. +use sgx_types::*; +use sgx_urts::SgxEnclave; + +use ekiden_common::error::{Error, Result}; + +/// Ekiden enclave. +pub struct Enclave { + /// Internal enclave instance. + enclave: SgxEnclave, +} + +impl Enclave { + /// Initializes a new enclave. + pub fn new(filename: &str) -> Result { + let mut launch_token: sgx_launch_token_t = [0; 1024]; + let mut launch_token_updated: i32 = 0; + + // Initialize enclave. + // TODO: Handle debug vs. release mode. + let debug = 1; + let mut misc_attr = sgx_misc_attribute_t { + secs_attr: sgx_attributes_t { flags: 0, xfrm: 0 }, + misc_select: 0, + }; + + let enclave = match SgxEnclave::create( + filename, + debug, + &mut launch_token, + &mut launch_token_updated, + &mut misc_attr, + ) { + Ok(enclave) => enclave, + Err(_) => { + return Err(Error::new("Failed to launch enclave")); + } + }; + + Ok(Enclave { enclave: enclave }) + } + + /// Return enclave identifier. + pub fn get_id(&self) -> sgx_enclave_id_t { + self.enclave.geteid() + } +} diff --git a/enclave/untrusted/src/identity.rs b/enclave/untrusted/src/identity.rs new file mode 100644 index 00000000000..b0005e5f71f --- /dev/null +++ b/enclave/untrusted/src/identity.rs @@ -0,0 +1,250 @@ +use std; +use std::path::Path; + +use sgx_types; + +use protobuf; +use protobuf::Message; + +use ekiden_common::error::{Error, Result}; +use ekiden_enclave_common::api; + +use super::enclave::Enclave; + +/// The IAS functionality that the enclave identity component needs. +pub trait IAS { + /// Get the SPID. This is needed to generate an appropriate quote. + fn get_spid(&self) -> &sgx_types::sgx_spid_t; + + /// Get the kind of quotes that this service expects. When you register for an SPID, you sign + /// up to use a specific kind of quote signature, either linkable or non-linkable. + fn get_quote_type(&self) -> sgx_types::sgx_quote_sign_type_t; + + /// Retrieve the signature revocation list for a given EPID group. + fn sigrl(&self, gid: &sgx_types::sgx_epid_group_id_t) -> Vec; + + /// Verify submitted attestation evidence and create a new Attestation Verification Report. + fn report(&self, quote: &[u8]) -> api::AvReport; +} + +/// Enclave identity interface. +pub trait EnclaveIdentity { + /// Initialize the enclave identity. Load it from a file or create one if it doesn't exist. + /// Returns the identity proof. + fn identity_init(&self, ias: &IAS, saved_identity_path: &Path) -> Result; +} + +const SEALED_DATA_CAPACITY: usize = 1024; +union SealedDataBuffer { + sealed_data: sgx_types::sgx_sealed_data_t, + buffer: [u8; SEALED_DATA_CAPACITY], +} + +const QUOTE_CAPACITY: usize = 16 * 1024; +union QuoteBuffer { + quote: sgx_types::sgx_quote_t, + buffer: [u8; QUOTE_CAPACITY], +} + +const PUBLIC_IDENTITY_CAPACITY: usize = 1024; + +impl EnclaveIdentity for Enclave { + /// Restore a saved identity, creating one and saving it if we don't already have one. Returns + /// the enclave identity proof. + fn identity_init(&self, ias: &IAS, saved_identity_path: &Path) -> Result { + if let Ok(mut file) = std::fs::File::open(saved_identity_path) { + // Have saved identity. Load it. + let mut saved_identity: api::SavedIdentity = protobuf::parse_from_reader(&mut file)?; + let sealed_identity_length = saved_identity.get_sealed_identity().len(); + if sealed_identity_length > SEALED_DATA_CAPACITY { + return Err(Error::new(format!( + "Saved identity is too large ({}/{})", + sealed_identity_length, SEALED_DATA_CAPACITY + ))); + } + let mut sealed_identity_buf: SealedDataBuffer = unsafe { std::mem::zeroed() }; + unsafe { &mut sealed_identity_buf.buffer[..sealed_identity_length] } + .copy_from_slice(saved_identity.get_sealed_identity()); + + // Restore the identity. + let mut public_identity = vec![0; PUBLIC_IDENTITY_CAPACITY]; + let mut public_identity_length = 0; + let result = unsafe { + super::ecall_proxy::identity_restore( + self.get_id(), + &sealed_identity_buf.sealed_data, + sealed_identity_length, + public_identity.as_mut_ptr(), + public_identity.len(), + &mut public_identity_length, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_restore: {}", result))); + } + public_identity.truncate(public_identity_length); + + // Send the AV report to the enclave. + let av_report_bytes = saved_identity.get_av_report().write_to_bytes()?; + let result = unsafe { + super::ecall_proxy::identity_set_av_report( + self.get_id(), + av_report_bytes.as_ptr(), + av_report_bytes.len(), + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_set_av_report: {}", result))); + } + + // Assemble the enclave identity proof. + let mut identity_proof = api::IdentityProof::new(); + identity_proof.set_public_identity(public_identity); + identity_proof.set_av_report(saved_identity.take_av_report()); + Ok(identity_proof) + } else { + // TODO: handle other errors + + // Don't have saved identity. Create a new identity. + let mut saved_identity = api::SavedIdentity::new(); + + // Get QE's target info and EPID gid. + let mut qe_target_info = unsafe { std::mem::zeroed() }; + let mut gid = unsafe { std::mem::zeroed() }; + let result = unsafe { sgx_types::sgx_init_quote(&mut qe_target_info, &mut gid) }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("sgx_init_quote: {}", result))); + } + + // Retrieve signature revocation list. + // TODO: implement and enable sigrl + // let sig_rl: Vec = ias.sigrl(&gid); + + // Create a new identity. + let mut sealed_identity_buf: SealedDataBuffer = unsafe { std::mem::zeroed() }; + let mut sealed_identity_length = 0; + let result = unsafe { + super::ecall_proxy::identity_create( + self.get_id(), + &mut sealed_identity_buf.sealed_data, + SEALED_DATA_CAPACITY, + &mut sealed_identity_length, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_create: {}", result))); + } + + // + saved_identity.set_sealed_identity( + unsafe { &sealed_identity_buf.buffer[..sealed_identity_length] }.to_vec(), + ); + + // Restore the identity. + let mut public_identity = vec![0; PUBLIC_IDENTITY_CAPACITY]; + let mut public_identity_length = 0; + let result = unsafe { + super::ecall_proxy::identity_restore( + self.get_id(), + &sealed_identity_buf.sealed_data, + sealed_identity_length, + public_identity.as_mut_ptr(), + public_identity.len(), + &mut public_identity_length, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_restore: {}", result))); + } + public_identity.truncate(public_identity_length); + + // Create a report for QE. + let mut report = unsafe { std::mem::zeroed() }; + let result = unsafe { + super::ecall_proxy::identity_create_report( + self.get_id(), + &qe_target_info, + &mut report, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_create_report: {}", result))); + } + + // Get a quote. + let mut quote_size = 0; + let result = unsafe { + sgx_types::sgx_calc_quote_size( + // TODO: implement and enable sigrl + std::ptr::null(), + 0, + // sig_rl.as_ptr(), + // sig_rl.len() as u32, + &mut quote_size, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("sgx_calc_quote_size: {}", result))); + } + if quote_size as usize > QUOTE_CAPACITY { + return Err(Error::new(format!( + "Quote identity is too large ({}/{})", + quote_size, QUOTE_CAPACITY + ))); + } + let mut quote_buf: QuoteBuffer = unsafe { std::mem::zeroed() }; + let nonce = unsafe { std::mem::zeroed() }; + let result = unsafe { + sgx_types::sgx_get_quote( + &report, + ias.get_quote_type(), + ias.get_spid(), + nonce, + // TODO: implement and enable sigrl + std::ptr::null(), + 0, + // sig_rl.as_ptr(), + // sig_rl.len() as u32, + std::ptr::null_mut(), + &mut quote_buf.quote, + quote_size, + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("sgx_get_quote: {}", result))); + } + + // Verify attestation evidence. + let av_report = ias.report(unsafe { "e_buf.buffer[..quote_size as usize] }); + + // + saved_identity.set_av_report(av_report); + + // Send the AV report to the enclave. + let av_report_bytes = saved_identity + .get_av_report() + .write_to_bytes() + .expect("Message::write_to_bytes"); + let result = unsafe { + super::ecall_proxy::identity_set_av_report( + self.get_id(), + av_report_bytes.as_ptr(), + av_report_bytes.len(), + ) + }; + if result != sgx_types::sgx_status_t::SGX_SUCCESS { + return Err(Error::new(format!("identity_set_av_report: {}", result))); + } + + // Save the identity. + let mut file = std::fs::File::create(saved_identity_path)?; + saved_identity.write_to_writer(&mut file)?; + + // Assemble the enclave identity proof. + let mut identity_proof = api::IdentityProof::new(); + identity_proof.set_public_identity(public_identity); + identity_proof.set_av_report(saved_identity.take_av_report()); + Ok(identity_proof) + } + } +} diff --git a/enclave/untrusted/src/lib.rs b/enclave/untrusted/src/lib.rs new file mode 100644 index 00000000000..3a161667fca --- /dev/null +++ b/enclave/untrusted/src/lib.rs @@ -0,0 +1,23 @@ +extern crate sgx_types; +extern crate sgx_urts; + +extern crate protobuf; + +extern crate ekiden_common; +extern crate ekiden_enclave_common; + +pub mod ecall_proxy; +pub mod enclave; +pub mod identity; + +// Exports. +pub use enclave::Enclave; + +// For the below link statements to work, the library paths need to be correctly +// configured. The easiest way to achieve that is to use the build_untrusted +// helper from ekiden_tools. + +// Ensure that we link to sgx_urts library. +#[cfg_attr(not(feature = "sgx-simulation"), link(name = "sgx_urts"))] +#[cfg_attr(feature = "sgx-simulation", link(name = "sgx_urts_sim"))] +extern "C" {} diff --git a/keys/attestation/client.pfx b/keys/attestation/client.pfx new file mode 100644 index 0000000000000000000000000000000000000000..67abdfd4a0465b7470ce5ed54d4f6169a28bdb21 GIT binary patch literal 2397 zcmV-j38MBef(cmy0Ru3C2_FUtDuzgg_YDCD0ic2j5Cnn=3^0NT2rz;JzXk~^hDe6@ z4FLxRpn?OiFoFZD0s#Opf&--n2`Yw2hW8Bt2LUh~1_~;MNQUf2k%mZZWewcg%-y+VNPSeE#d{Wd-c)xgo0?U32JgnVW2zA;~5

981W{>+1Cg;dy!VviloRcg9lLRFVth~MxXsI?O0q+@dwY$B&$wL0 z_emxs4Z+mopT`W-rKCXLi=*q^(#VIHkrd{@#v>!Km>l^jgDakbLxCY2jcTqLF&B(m zvj=wc=CsU(1ccn_TmSR^MFx%&DCb@r*bT@u==~l*iCjSUORC)d=xX!BV4|UYKJ`OJ z`g5YqN3b`p3_O+S6yLV-KWllvgY&W(ZibLh>b5?eyLk(omMnaamKU0+;AyvuX>awS zzo7%jq)egGypm^ePAptINPkx@gVnX2a%KJzJGMa#H}hO+aM|YI?^(z+9hR zP`g7RDOs!;iC7fMH$G^(|KF4l8#@>|Vo}$wo&1 zLi5v(krOg4suZ#-9KT)cv$5VGhbhtpcP0bZsKfwS>}R%1h+Ho7Rs>Zp5R(ib`j$yc zv!-Zwm%GU*R1<%KejAAnA93TnUg6+9i*uT*WVw3>kL1b*Vw*@x_7)Q_%I$Usaa!d{ z;_iCLO*qx%{aI>E)|Q(Nkr}m&i*OI4B;S)at;4^Wck#1oHHJdpxU%?VbtgdpNvK)|>EvZCVA3|w~ZAZz&LNQUC`>0?!3i|(uXeaX?yJXm!cQf6p$ni;u^F_ipJ-LO!~e+9@iv9K84`)g zFoW_=ub0nEmu?N2M)P8)SZ=#M1cEQ0;oWVDa^=po_Wu90vlQh8bXR zS0x`A7RCHF+`&Gs`E$#AO?_mCGv=7$pA=aTIy6Xv$JItj{&lZ4v~xdLBydg02zIfg zhCBoyNw2u0gv)uf6SLh`bNzOYX{qPlX@hVu7PWTtbL(UED_^1UEWJ9W(#oDA$|OoIkTq0O8)38j{!CCM$4XkIt>1R|JUu#)FWq=QvFQR z|Cj*+3kfxM(ik86kf;qmaNh-{#;{Rg`fKd`RfeiIk7rM(bP>+< zdv)rmC%*15pX3(Nu@r`m8<0H22BDs}A~%pa#B0CJpuqHJABM{ulwn9pL>$D8aKDqN zt*9_HDy_3Wu%6k#n1i1ksijjuZDaV$VwfdHx{N(zcKoBjd^OJef5!(X{DfTK#M@q# z6xM_G$L>b-0tQpVMQGAg_@9m{{uUQv5-`d;3>fq35a!(p~^(na8QTbe=``+AfQ=N677`k*B=P_Z-13vh~H zgy0`Tb*w;lAlPAQm;(&2I4xrL%p(m`*)>!MoPz%lY0>p! zw85=iUOvVVHh}56^R?#+AQp`IA{V7QA?#7ftaw)}bC9HPaHrLix_3bhYtuBL+e>lH zMk7Yu@uVYHYL_j51fk8aiN_6CVzHD7PncL~Xy>baZ*ACr`?(fEJ{$vSu3bQcJu~s- zxvww-cZ*zNs^PS^OfJ?iA6{bqER0|z@~QqnD6&emi}a*wPMk_?;Sez;znpn+MFEaK zcUG+2a@TS8*Y3ti8kypPApvyo$ntW(C)PoBtSAO_uP$}97>}s$Go0UB?w!y?U-VKn z3Y4(ZwhT@Gsb-qM*F_JK|2QihJ2Z2T#&Ka$u;w@~w6A{`0L zOeHTz>`tLmoE$sHQEC~+L5xewQ@5^%uH=d*q&ePFvoVQ0mqN=fHsssv&xYrZqyV?K zh+&{#h&EVD?IDGo%3V&j44FN7O8#{FntP~q-8g4**$&-<-Y7{0$Pwagp_uXm*Nb03 zh*YFM(=~f$JBfi2ql(?EbTz)$G8(had}T2uFe3&DDuzgg_YDCF6)_eB6y%fDIrD=# zdWhe-+#jq`j1Muc4KOh"] +description = "Ekiden RPC client" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../../enclave/common", version = "0.1.0-alpha.1" } +ekiden-rpc-common = { path = "../common", version = "0.1.0-alpha.1" } +protobuf = "1.4.3" +sodalite = "0.3.0" + +[target.'cfg(target_env = "sgx")'.dependencies] +futures-sgx = { git = "https://github.com/ekiden/futures-rs" } + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +ekiden-compute-api = { path = "../../compute/api", version = "0.1.0-alpha.1" } +byteorder = "1.2.1" +futures = "0.1" +grpc = "0.2.1" +httpbis = "0.4.1" +rand = "0.4" +tls-api = "0.1.12" +tokio-core = "0.1" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } +protoc-rust-grpc = "0.2.1" diff --git a/rpc/client/Makefile.toml b/rpc/client/Makefile.toml new file mode 100644 index 00000000000..9dcb41fa936 --- /dev/null +++ b/rpc/client/Makefile.toml @@ -0,0 +1,4 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_SGX_TARGET = "true" diff --git a/rpc/client/src/backend/base.rs b/rpc/client/src/backend/base.rs new file mode 100644 index 00000000000..47b565879ef --- /dev/null +++ b/rpc/client/src/backend/base.rs @@ -0,0 +1,22 @@ +use futures::Future; + +use ekiden_rpc_common::api; + +use super::super::future::ClientFuture; + +/// Contract client backend. +pub trait ContractClientBackend: Send { + /// Spawn future using an executor. + fn spawn + Send + 'static>(&self, future: F); + + /// Call contract. + fn call(&self, client_request: api::ClientRequest) -> ClientFuture; + + /// Call contract with raw data. + fn call_raw(&self, request: Vec) -> ClientFuture>; + + /// Get credentials. + /// + /// This method should return `None` to connect anonymously. + fn get_credentials(&self) -> Option; +} diff --git a/rpc/client/src/backend/mod.rs b/rpc/client/src/backend/mod.rs new file mode 100644 index 00000000000..3da185ef917 --- /dev/null +++ b/rpc/client/src/backend/mod.rs @@ -0,0 +1,22 @@ +//! RPC client backend. + +use ekiden_enclave_common::api::IdentityProof; +use sodalite; + +pub struct ContractClientCredentials { + /// The long-term client key. + pub long_term_private_key: sodalite::BoxSecretKey, + /// The enclave identity proof of the client for mutual authentication. + pub identity_proof: IdentityProof, +} + +mod base; + +#[cfg(not(target_env = "sgx"))] +pub mod web3; + +// Re-export. +pub use self::base::ContractClientBackend; + +#[cfg(not(target_env = "sgx"))] +pub use self::web3::Web3ContractClientBackend; diff --git a/rpc/client/src/backend/web3.rs b/rpc/client/src/backend/web3.rs new file mode 100644 index 00000000000..a85de51d79d --- /dev/null +++ b/rpc/client/src/backend/web3.rs @@ -0,0 +1,236 @@ +//! gRPC client backend. +use std::sync::{Arc, Mutex}; + +use grpc; +use tokio_core; + +use futures::future::{self, Future}; + +use protobuf; +use protobuf::Message; + +use ekiden_common::error::{Error, Result}; +use ekiden_rpc_common::api; + +use ekiden_compute_api::{CallContractRequest, Compute, ComputeClient}; + +use super::{ContractClientBackend, ContractClientCredentials}; +use super::super::future::ClientFuture; + +/// Address of a compute node. +pub struct ComputeNodeAddress { + /// Compute node hostname. + pub host: String, + /// Compute node port. + pub port: u16, +} + +struct ComputeNode { + /// gRPC client for the given node. + client: ComputeClient, + /// Failed flag. + failed: bool, +} + +#[derive(Default)] +struct ComputeNodes { + /// Active nodes. + nodes: Arc>>, +} + +impl ComputeNodes { + /// Construct new pool of compute nodes. + fn new(nodes: &[ComputeNodeAddress]) -> Result { + let instance = ComputeNodes::default(); + + for node in nodes { + instance.add_node(node)?; + } + + Ok(instance) + } + + /// Add a new compute node. + fn add_node(&self, address: &ComputeNodeAddress) -> Result<()> { + // TODO: Pass specific reactor to the compute client as otherwise it will spawn a new thread. + let client = match ComputeClient::new_plain(&address.host, address.port, Default::default()) + { + Ok(client) => client, + _ => return Err(Error::new("Failed to initialize gRPC client")), + }; + + let mut nodes = self.nodes.lock().unwrap(); + nodes.push(ComputeNode { + client, + failed: false, + }); + + Ok(()) + } + + /// Call the first available compute node. + fn call_available_node( + &self, + client_request: Vec, + max_retries: usize, + ) -> ClientFuture> { + let mut rpc_request = CallContractRequest::new(); + rpc_request.set_payload(client_request); + + let shared_nodes = self.nodes.clone(); + + let try_times = future::loop_fn( + max_retries, + move |retries| -> ClientFuture, usize>> { + // Abort when we have reached the given number of retries. + if retries == 0 { + return Box::new(future::err(Error::new( + "No active compute nodes are available", + ))); + } + + let cloned_nodes = shared_nodes.clone(); + let rpc_request = rpc_request.clone(); + + // Try to find an active node on each iteration. + let try_node = future::loop_fn( + (), + move |_| -> ClientFuture, ()>> { + let nodes = cloned_nodes.lock().unwrap(); + + // Find the first non-failed node and use it to send a request. + match nodes.iter().enumerate().find(|&(_, node)| !node.failed) { + Some((index, ref node)) => { + // Found a non-failed node. + let cloned_nodes = cloned_nodes.clone(); + + return Box::new( + node.client + .call_contract( + grpc::RequestOptions::new(), + rpc_request.clone(), + ) + .drop_metadata() + .then(move |result| { + match result { + Ok(mut response) => { + Ok(future::Loop::Break(response.take_payload())) + } + Err(_) => { + let mut nodes = cloned_nodes.lock().unwrap(); + // Since we never remove or reorder nodes, we can be sure that this + // index always belongs to the specified node and we can avoid sharing + // and locking individual node instances. + nodes[index].failed = true; + + Ok(future::Loop::Continue(())) + } + } + }), + ); + } + None => {} + } + + Box::new(future::err(Error::new( + "No active compute nodes are available on this retry", + ))) + }, + ); + + let cloned_nodes = shared_nodes.clone(); + + Box::new(try_node.then(move |result| match result { + Ok(response) => Ok(future::Loop::Break(response)), + Err(_) => { + let mut nodes = cloned_nodes.lock().unwrap(); + + // All nodes seem to be failed. Reset failed status for next retry. + for node in nodes.iter_mut() { + node.failed = false; + } + + Ok(future::Loop::Continue(retries - 1)) + } + })) + }, + ); + + Box::new(try_times) + } +} + +/// gRPC client backend. +pub struct Web3ContractClientBackend { + /// Handle of the reactor used for running all futures. + reactor: tokio_core::reactor::Remote, + /// Pool of compute nodes that the client can use. + nodes: ComputeNodes, +} + +impl Web3ContractClientBackend { + /// Construct new Web3 contract client backend. + pub fn new(reactor: tokio_core::reactor::Remote, host: &str, port: u16) -> Result { + Self::new_pool( + reactor, + &[ + ComputeNodeAddress { + host: host.to_string(), + port: port, + }, + ], + ) + } + + /// Construct new Web3 contract client backend with a pool of nodes. + pub fn new_pool( + reactor: tokio_core::reactor::Remote, + nodes: &[ComputeNodeAddress], + ) -> Result { + Ok(Web3ContractClientBackend { + reactor: reactor.clone(), + nodes: ComputeNodes::new(&nodes)?, + }) + } + + /// Add a new compute node for this client. + pub fn add_node(&self, address: &ComputeNodeAddress) -> Result<()> { + self.nodes.add_node(&address) + } + + /// Perform a raw contract call via gRPC. + fn call_available_node(&self, client_request: Vec) -> ClientFuture> { + self.nodes.call_available_node(client_request, 3) + } +} + +impl ContractClientBackend for Web3ContractClientBackend { + /// Spawn future using an executor. + fn spawn + Send + 'static>(&self, future: F) { + self.reactor.spawn(move |_| future); + } + + /// Call contract. + fn call(&self, client_request: api::ClientRequest) -> ClientFuture { + let result = self.call_raw(match client_request.write_to_bytes() { + Ok(request) => request, + _ => return Box::new(future::err(Error::new("Failed to serialize request"))), + }).and_then(|response| { + let client_response: api::ClientResponse = protobuf::parse_from_bytes(&response)?; + + Ok(client_response) + }); + + Box::new(result) + } + + /// Call contract with raw data. + fn call_raw(&self, client_request: Vec) -> ClientFuture> { + self.call_available_node(client_request) + } + + /// Get credentials. + fn get_credentials(&self) -> Option { + None + } +} diff --git a/rpc/client/src/client.rs b/rpc/client/src/client.rs new file mode 100644 index 00000000000..91f56673775 --- /dev/null +++ b/rpc/client/src/client.rs @@ -0,0 +1,553 @@ +use std::sync::Arc; +#[cfg(not(target_env = "sgx"))] +use std::sync::Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; + +#[cfg(not(target_env = "sgx"))] +use futures::Stream; +use futures::future::{self, Future}; +#[cfg(not(target_env = "sgx"))] +use futures::sync::{mpsc, oneshot}; + +use protobuf; +use protobuf::{Message, MessageStatic}; + +use ekiden_common::error::Error; +#[cfg(not(target_env = "sgx"))] +use ekiden_common::error::Result; +use ekiden_enclave_common::quote::MrEnclave; +use ekiden_rpc_common::api; + +use super::backend::ContractClientBackend; +use super::future::ClientFuture; +#[cfg(target_env = "sgx")] +use super::future::FutureExtra; +use super::secure_channel::SecureChannelContext; + +/// Commands sent to the processing task. +#[cfg(not(target_env = "sgx"))] +enum Command { + /// Make a remote method call. + Call(api::PlainClientRequest, oneshot::Sender>>), + /// Initialize secure channel. + InitSecureChannel(oneshot::Sender>), + /// Close secure channel. + CloseSecureChannel(oneshot::Sender>), +} + +/// Contract client context used for async calls. +struct ContractClientContext { + /// Backend handling network communication. + backend: Backend, + /// Contract MRENCLAVE. + mr_enclave: MrEnclave, + /// Secure channel context. + secure_channel: SecureChannelContext, + /// Client authentication required flag. + client_authentication: bool, +} + +/// Helper for running client commands. +#[cfg(not(target_env = "sgx"))] +fn run_command(cmd: F, response_tx: oneshot::Sender>) -> ClientFuture<()> +where + F: Future + Send + 'static, + R: Send + 'static, +{ + Box::new(cmd.then(move |result| { + // Send command result back to response channel, ignoring any errors, which + // may be due to closing of the other end of the response channel. + response_tx.send(result).or(Ok(())) + })) +} + +impl ContractClientContext { + /// Process commands sent via the command channel. + /// + /// This method returns a future, which keeps processing all commands received + /// via the `request_rx` channel. It should be spawned as a separate task. + /// + /// Processing commands in this way ensures that all client requests are processed + /// in order, with no interleaving of requests, regardless of how the futures + /// executor is implemented. + #[cfg(not(target_env = "sgx"))] + fn process_commands( + context: Arc>, + request_rx: mpsc::UnboundedReceiver, + ) -> ClientFuture<()> { + // Process all requests in order. The stream processing ends when the sender + // handle (request_tx) in ContractClient is dropped. + let result = request_rx + .map_err(|_| Error::new("Command channel closed")) + .for_each(move |command| -> ClientFuture<()> { + match command { + Command::Call(request, response_tx) => { + run_command(Self::call_raw(context.clone(), request), response_tx) + } + Command::InitSecureChannel(response_tx) => { + run_command(Self::init_secure_channel(context.clone()), response_tx) + } + Command::CloseSecureChannel(response_tx) => { + run_command(Self::close_secure_channel(context.clone()), response_tx) + } + } + }); + + Box::new(result) + } + + /// Call a contract method. + fn call_raw( + context: Arc>, + plain_request: api::PlainClientRequest, + ) -> ClientFuture> { + // Ensure secure channel is initialized before making the request. + let init_sc = Self::init_secure_channel(context.clone()); + + // Context moved into the closure (renamed for clarity). + let shared_context = context; + + let result = init_sc.and_then(move |_| -> ClientFuture> { + // Clone method for use in later future. + let cloned_method = plain_request.get_method().to_owned(); + + // Prepare the backend call future. This is done in a new scope so that the held + // lock is released early and we can move shared_context into the next future. + let backend_call = { + let mut context = shared_context.lock().unwrap(); + + let mut client_request = api::ClientRequest::new(); + if context.secure_channel.must_encrypt() { + // Encrypt request. + client_request.set_encrypted_request(match context + .secure_channel + .create_request_box(&plain_request) + { + Ok(request) => request, + Err(error) => return Box::new(future::err(error)), + }); + } else { + // Plain-text request. + client_request.set_plain_request(plain_request); + } + + // Invoke the backend to make the actual request. + context.backend.call(client_request) + }; + + // After the backend call is done, handle the response. + let result = backend_call.and_then( + move |mut client_response| -> ClientFuture> { + let mut plain_response = { + let mut context = shared_context.lock().unwrap(); + + let mut plain_response = { + if client_response.has_encrypted_response() { + // Encrypted response. + match context + .secure_channel + .open_response_box(&client_response.get_encrypted_response()) + { + Ok(response) => response, + Err(error) => return Box::new(future::err(error)), + } + } else { + // Plain-text response. + client_response.take_plain_response() + } + }; + + if context.secure_channel.must_encrypt() + && !client_response.has_encrypted_response() + { + match plain_response.get_code() { + api::PlainClientResponse_Code::ERROR_SECURE_CHANNEL => { + // Request the secure channel to be reset. + // NOTE: This opens us up to potential adversarial interference as an + // adversarial compute node can force the channel to be reset by + // crafting a non-authenticated response. But a compute node can + // always deny service or prevent the secure channel from being + // established in the first place, so this is not really an issue. + if cloned_method != api::METHOD_CHANNEL_INIT { + context.secure_channel.close(); + + // Channel will reset on the next request. + return Box::new(future::err(Error::new( + "Secure channel closed", + ))); + } + } + _ => {} + } + + return Box::new(future::err(Error::new( + "Contract returned plain response for encrypted request", + ))); + } + + plain_response + }; + + // Validate response code. + match plain_response.get_code() { + api::PlainClientResponse_Code::SUCCESS => {} + _ => { + // Deserialize error. + let mut error: api::Error = { + match protobuf::parse_from_bytes(&plain_response.take_payload()) { + Ok(error) => error, + _ => return Box::new(future::err(Error::new("Unknown error"))), + } + }; + + return Box::new(future::err(Error::new(error.get_message()))); + } + }; + + Box::new(future::ok(plain_response.take_payload())) + }, + ); + + Box::new(result) + }); + + Box::new(result) + } + + /// Call a contract method. + fn call(context: Arc>, method: &str, request: Rq) -> ClientFuture + where + Rq: Message, + Rs: Message + MessageStatic, + { + // Create a request. + let mut plain_request = api::PlainClientRequest::new(); + plain_request.set_method(method.to_owned()); + plain_request.set_payload(match request.write_to_bytes() { + Ok(payload) => payload, + _ => return Box::new(future::err(Error::new("Failed to serialize request"))), + }); + + // Make the raw call and then deserialize the response. + let result = Self::call_raw(context, plain_request).and_then(|plain_response| { + let response: Rs = match protobuf::parse_from_bytes(&plain_response) { + Ok(response) => response, + Err(error) => return Err(Error::from(error)), + }; + + Ok(response) + }); + + Box::new(result) + } + + /// Initialize a secure channel with the contract. + /// + /// If the channel has already been initialized the future returned by this method + /// will immediately resolve. + fn init_secure_channel(context: Arc>) -> ClientFuture<()> { + // Context moved into the closure (renamed for clarity). + let shared_context = context; + + let result = future::lazy(move || { + // Return is futures::future::Either. A is immediate return. B is request. + + let request = { + let mut context = shared_context.lock().unwrap(); + + // If secure channel is already initialized, we don't need to do anything. + if !context.secure_channel.is_closed() { + return future::Either::A(future::ok(())); + } + + // Reset secure channel. + match context.secure_channel.reset() { + Ok(()) => {} + Err(error) => return future::Either::A(future::err(error)), + }; + + let mut request = api::ChannelInitRequest::new(); + request.set_short_term_public_key( + context.secure_channel.get_client_public_key().to_vec(), + ); + request + }; + + // Call remote channel init. + future::Either::B( + Self::call::( + shared_context.clone(), + api::METHOD_CHANNEL_INIT, + request, + ).and_then(move |response: api::ChannelInitResponse| { + // Return is futures::future::Either. A is immediate return. B is request. + + let request = { + let mut context = shared_context.lock().unwrap(); + let client_authentication = context.client_authentication; + + // Verify contract identity and set up a secure channel. + let iai = match context.secure_channel.setup( + response.get_authenticated_short_term_public_key(), + client_authentication, + ) { + Ok(iai) => iai, + Err(e) => return future::Either::A(future::err(e)), + }; + + // Verify MRENCLAVE. + if &iai.mr_enclave != &context.mr_enclave { + return future::Either::A(future::err(Error::new( + "Secure channel initialization failed: MRENCLAVE mismatch", + ))); + } + + // TODO: Other access control policy on enclave identity will go here. + + // If we don't need to authenticate, we're done. + if !client_authentication { + return future::Either::A(future::ok(())); + } + + let mut request = api::ChannelAuthRequest::new(); + let credentials = match context.backend.get_credentials() { + Some(credentials) => credentials, + None => return future::Either::A(future::err(Error::new("Channel requires client authentication and backend has no credentials"))), + }; + let bastpk = match context.secure_channel.get_authentication( + &credentials.long_term_private_key, + credentials.identity_proof, + ) { + Ok(bastpk) => bastpk, + Err(e) => return future::Either::A(future::err(e)), + }; + request.set_boxed_authenticated_short_term_public_key(bastpk); + request + }; + + // Call remote channel auth. + future::Either::B( + Self::call::( + shared_context.clone(), + api::METHOD_CHANNEL_AUTH, + request, + ).and_then( + move |_response: api::ChannelAuthResponse| { + let mut context = shared_context.lock().unwrap(); + + context.secure_channel.authentication_sent() + }, + ), + ) + }), + ) + }); + + Box::new(result) + } + + /// Close secure channel. + /// + /// If this method is not called, secure channel is automatically closed in + /// a blocking fashion when the client is dropped. + fn close_secure_channel(context: Arc>) -> ClientFuture<()> { + // Context moved into the closure (renamed for clarity). + let shared_context = context; + + let result = future::lazy(move || -> ClientFuture<()> { + { + let context = shared_context.lock().unwrap(); + + // If secure channel is not open we don't need to do anything. + if context.secure_channel.is_closed() { + return Box::new(future::ok(())); + } + } + + // Send request to close channel. + let request = api::ChannelCloseRequest::new(); + + let result = Self::call::( + shared_context.clone(), + api::METHOD_CHANNEL_CLOSE, + request, + ).and_then(move |_| { + let mut context = shared_context.lock().unwrap(); + + // Close local part of the secure channel. + context.secure_channel.close(); + + Ok(()) + }); + + Box::new(result) + }); + + Box::new(result) + } +} + +/// Contract client. +pub struct ContractClient { + /// Actual client context that can be shared between threads. + context: Arc>>, + /// Channel for processing requests. + #[cfg(not(target_env = "sgx"))] + request_tx: mpsc::UnboundedSender, +} + +impl ContractClient { + /// Constructs a new contract client. + /// The client API macro calls this. + pub fn new(backend: Backend, mr_enclave: MrEnclave, client_authentication: bool) -> Self { + // Create request processing channel. + #[cfg(not(target_env = "sgx"))] + let (request_tx, request_rx) = mpsc::unbounded(); + + let client = ContractClient { + context: Arc::new(Mutex::new(ContractClientContext { + backend: backend, + mr_enclave: mr_enclave, + secure_channel: SecureChannelContext::default(), + client_authentication: client_authentication, + })), + #[cfg(not(target_env = "sgx"))] + request_tx: request_tx, + }; + + #[cfg(not(target_env = "sgx"))] + { + // Spawn a task for processing requests. + let request_processor = + ContractClientContext::process_commands(client.context.clone(), request_rx); + + let context = client.context.lock().unwrap(); + context + .backend + .spawn(request_processor.then(|_| future::ok(()))); + } + + client + } + + /// Call a contract method. + #[cfg(target_env = "sgx")] + pub fn call(&self, method: &str, request: Rq) -> ClientFuture + where + Rq: Message, + Rs: Message + MessageStatic, + { + ContractClientContext::call(self.context.clone(), &method, request) + } + + /// Call a contract method. + #[cfg(not(target_env = "sgx"))] + pub fn call(&self, method: &str, request: Rq) -> ClientFuture + where + Rq: Message, + Rs: Message + MessageStatic, + { + let (call_tx, call_rx) = oneshot::channel(); + + // Create a request. + let mut plain_request = api::PlainClientRequest::new(); + plain_request.set_method(method.to_owned()); + plain_request.set_payload(match request.write_to_bytes() { + Ok(payload) => payload, + _ => return Box::new(future::err(Error::new("Failed to serialize request"))), + }); + + if let Err(_) = self.request_tx + .unbounded_send(Command::Call(plain_request, call_tx)) + { + return Box::new(future::err(Error::new("Command channel closed"))); + } + + // Wait for response. + let result = call_rx + .map_err(|_| Error::new("Command channel closed")) + .and_then(|result| match result { + Ok(plain_response) => { + let response: Rs = match protobuf::parse_from_bytes(&plain_response) { + Ok(response) => response, + Err(error) => return Err(Error::from(error)), + }; + + Ok(response) + } + Err(error) => Err(error), + }); + + Box::new(result) + } + + /// Initialize a secure channel with the contract. + /// + /// If this method is not called, secure channel is automatically initialized + /// when making the first request. + #[cfg(target_env = "sgx")] + pub fn init_secure_channel(&self) -> ClientFuture<()> { + ContractClientContext::init_secure_channel(self.context.clone()) + } + + /// Initialize a secure channel with the contract. + /// + /// If this method is not called, secure channel is automatically initialized + /// when making the first request. + #[cfg(not(target_env = "sgx"))] + pub fn init_secure_channel(&self) -> ClientFuture<()> { + let (call_tx, call_rx) = oneshot::channel(); + + if let Err(_) = self.request_tx + .unbounded_send(Command::InitSecureChannel(call_tx)) + { + return Box::new(future::err(Error::new("Command channel closed"))); + } + + // Wait for response. + let result = call_rx + .map_err(|_| Error::new("Command channel closed")) + .and_then(|result| result); + + Box::new(result) + } + + /// Close secure channel. + /// + /// If this method is not called, secure channel is automatically closed in + /// a blocking fashion when the client is dropped. + #[cfg(target_env = "sgx")] + pub fn close_secure_channel(&self) -> ClientFuture<()> { + ContractClientContext::close_secure_channel(self.context.clone()) + } + + /// Close secure channel. + /// + /// If this method is not called, secure channel is automatically closed in + /// a blocking fashion when the client is dropped. + #[cfg(not(target_env = "sgx"))] + pub fn close_secure_channel(&self) -> ClientFuture<()> { + let (call_tx, call_rx) = oneshot::channel(); + + if let Err(_) = self.request_tx + .unbounded_send(Command::CloseSecureChannel(call_tx)) + { + return Box::new(future::err(Error::new("Command channel closed"))); + } + + // Wait for response. + let result = call_rx + .map_err(|_| Error::new("Command channel closed")) + .and_then(|result| result); + + Box::new(result) + } +} + +impl Drop for ContractClient { + /// Close secure channel when going out of scope. + fn drop(&mut self) { + self.close_secure_channel().wait().unwrap_or(()); + } +} diff --git a/rpc/client/src/future.rs b/rpc/client/src/future.rs new file mode 100644 index 00000000000..29696a7667b --- /dev/null +++ b/rpc/client/src/future.rs @@ -0,0 +1,29 @@ +use futures::Async; +use futures::future::Future; + +use ekiden_common::error::Error; + +/// Future type for use in client calls. +pub type ClientFuture = Box + Send>; + +/// Future trait with extra helper methods. +pub trait FutureExtra: Future { + fn wait(self) -> Result + where + Self: Sized; +} + +impl FutureExtra for F { + fn wait(mut self) -> Result + where + Self: Sized, + { + // Ekiden SGX enclaves are currently single-threaded and all OCALLs are blocking, + // so nothing should return Async::NotReady. + match self.poll() { + Ok(Async::NotReady) => panic!("Futures in SGX should always block"), + Ok(Async::Ready(result)) => Ok(result), + Err(error) => Err(error), + } + } +} diff --git a/rpc/client/src/lib.rs b/rpc/client/src/lib.rs new file mode 100644 index 00000000000..3bc6831876a --- /dev/null +++ b/rpc/client/src/lib.rs @@ -0,0 +1,31 @@ +#[cfg(not(target_env = "sgx"))] +extern crate grpc; +#[cfg(not(target_env = "sgx"))] +extern crate rand; +#[cfg(not(target_env = "sgx"))] +extern crate tls_api; +#[cfg(not(target_env = "sgx"))] +extern crate tokio_core; + +extern crate futures; +extern crate protobuf; +extern crate sodalite; + +extern crate ekiden_common; +#[cfg(not(target_env = "sgx"))] +extern crate ekiden_compute_api; +extern crate ekiden_enclave_common; +extern crate ekiden_rpc_common; + +pub mod backend; +mod secure_channel; +mod client; +mod future; + +#[doc(hidden)] +#[macro_use] +pub mod macros; + +// Re-export. +pub use client::ContractClient; +pub use future::{ClientFuture, FutureExtra}; diff --git a/rpc/client/src/macros.rs b/rpc/client/src/macros.rs new file mode 100644 index 00000000000..9d100955d36 --- /dev/null +++ b/rpc/client/src/macros.rs @@ -0,0 +1,88 @@ +// This is re-exported here only so it can be used in macros under a common name. +pub use ekiden_enclave_common::quote; + +/// Create an RPC client for a given API. +/// +/// # Examples +/// +/// This macro should be invoked using a concrete API generated by `rpc_api` as +/// follows: +/// ``` +/// with_api! { +/// create_client_rpc!(foo, foo_api, api); +/// } +/// ``` +/// +/// In this example, the generated client will be put into a module called `foo` +/// which will use API structures from module `foo_api`. The API definitions will +/// passed as the last argument as defined by the `api` token. +#[macro_export] +macro_rules! create_client_rpc { + ( + $output_module: ident, + $api_module: path, + + metadata { + name = $metadata_name: ident ; + version = $metadata_version: expr ; + client_attestation_required = $client_attestation_required: expr ; + } + + $( + rpc $method_name: ident ( $request_type: ty ) -> $response_type: ty ; + )* + ) => { + mod $output_module { + use $crate::*; + use $crate::backend::ContractClientBackend; + + pub use $api_module::*; + + pub struct Client { + client: ContractClient, + } + + #[allow(dead_code)] + impl Client { + /// Create new client instance. + pub fn new(backend: Backend, + mr_enclave: $crate::macros::quote::MrEnclave) -> Self { + + Client { + client: ContractClient::new( + backend, + mr_enclave, + $client_attestation_required, + ), + } + } + + /// Initialize a secure channel with the contract. + /// + /// If this method is not called, secure channel is automatically initialized + /// when making the first request. + pub fn init_secure_channel(&self) -> ClientFuture<()> { + self.client.init_secure_channel() + } + + /// Close secure channel. + /// + /// If this method is not called, secure channel is automatically closed in + /// a blocking fashion when the client is dropped. + pub fn close_secure_channel(&self) -> ClientFuture<()> { + self.client.close_secure_channel() + } + + // Generate methods. + $( + pub fn $method_name( + &mut self, + request: $request_type + ) -> ClientFuture<$response_type> { + self.client.call(stringify!($method_name), request) + } + )* + } + } + }; +} diff --git a/rpc/client/src/secure_channel.rs b/rpc/client/src/secure_channel.rs new file mode 100644 index 00000000000..c647b1621cd --- /dev/null +++ b/rpc/client/src/secure_channel.rs @@ -0,0 +1,221 @@ +use sodalite; + +use protobuf; +use protobuf::Message; + +use ekiden_common::error::{Error, Result}; +use ekiden_common::random; +use ekiden_enclave_common; +use ekiden_rpc_common::api; +use ekiden_rpc_common::secure_channel::{create_box, open_box, MonotonicNonceGenerator, + NonceGenerator, RandomNonceGenerator, SessionState, + NONCE_CONTEXT_AUTHIN, NONCE_CONTEXT_AUTHOUT, + NONCE_CONTEXT_INIT, NONCE_CONTEXT_REQUEST, + NONCE_CONTEXT_RESPONSE}; + +// Secret seed used for generating private and public keys. +const SECRET_SEED_LEN: usize = 32; +type SecretSeed = [u8; SECRET_SEED_LEN]; + +/// Secure channel context. +/// +/// Contains state and methods needed for secure communication with the remote +/// contract. +#[derive(Default)] +pub struct SecureChannelContext { + /// Client short-term private key. + client_private_key: sodalite::BoxSecretKey, + /// Client short-term public key. + client_public_key: sodalite::BoxPublicKey, + /// Contract contract long-term public key. + contract_long_term_public_key: sodalite::BoxPublicKey, + /// Contract contract short-term public key. + contract_short_term_public_key: sodalite::BoxPublicKey, + /// Cached shared key. + shared_key: Option, + /// Session state. + state: SessionState, + /// Long-term nonce generator. + long_term_nonce_generator: RandomNonceGenerator, + /// Short-term nonce generator. + short_term_nonce_generator: MonotonicNonceGenerator, +} + +impl SecureChannelContext { + /// Reset secure channel context. + /// + /// Calling this function will generate new short-term keys for the client + /// and clear any contract public keys. + pub fn reset(&mut self) -> Result<()> { + // Generate new short-term key pair for the client. + let mut seed: SecretSeed = [0u8; SECRET_SEED_LEN]; + random::get_random_bytes(&mut seed)?; + + sodalite::box_keypair_seed( + &mut self.client_public_key, + &mut self.client_private_key, + &seed, + ); + + // Clear contract keys. + self.contract_long_term_public_key = [0; sodalite::BOX_PUBLIC_KEY_LEN]; + self.contract_short_term_public_key = [0; sodalite::BOX_PUBLIC_KEY_LEN]; + + // Clear session keys. + self.shared_key = None; + + // Reset session nonce. + self.short_term_nonce_generator.reset(); + + self.state.transition_to(SessionState::Init)?; + + Ok(()) + } + + /// Setup secure channel. + pub fn setup( + &mut self, + contract_astpk: &api::AuthenticatedShortTermPublicKey, + client_authentication_required: bool, + ) -> Result { + let iai = ekiden_enclave_common::quote::verify(contract_astpk.get_identity_proof())?; + + self.contract_long_term_public_key = iai.identity.rpc_key_e_pub.clone(); + + // Open boxed short term contract public key. + let mut shared_key: Option = None; + let contract_short_term_public_key = open_box( + contract_astpk.get_boxed_short_term_public_key(), + &NONCE_CONTEXT_INIT, + &mut self.long_term_nonce_generator, + &self.contract_long_term_public_key, + &self.client_private_key, + &mut shared_key, + )?; + + self.contract_short_term_public_key + .copy_from_slice(&contract_short_term_public_key); + + if client_authentication_required { + self.state + .transition_to(SessionState::ClientAuthenticating)?; + } else { + self.state.transition_to(SessionState::Established)?; + } + + // Cache shared channel key. + let mut key = self.shared_key + .get_or_insert([0u8; sodalite::SECRETBOX_KEY_LEN]); + sodalite::box_beforenm( + &mut key, + &self.contract_short_term_public_key, + &self.client_private_key, + ); + + Ok(iai) + } + + /// Generate a client authentication box. + pub fn get_authentication( + &mut self, + client_ltsk: &sodalite::BoxSecretKey, + identity_proof: ekiden_enclave_common::api::IdentityProof, + ) -> Result { + if self.state != SessionState::ClientAuthenticating { + return Err(Error::new("Invalid secure channel access")); + } + let box_inner = create_box( + &self.client_public_key, + &NONCE_CONTEXT_AUTHIN, + &mut self.long_term_nonce_generator, + &self.contract_long_term_public_key, + client_ltsk, + &mut None, + )?; + let mut astpk = api::AuthenticatedShortTermPublicKey::new(); + astpk.set_identity_proof(identity_proof); + astpk.set_boxed_short_term_public_key(box_inner); + let astpk_bytes = astpk.write_to_bytes()?; + let mut box_outer = create_box( + &astpk_bytes, + &NONCE_CONTEXT_AUTHOUT, + &mut self.short_term_nonce_generator, + &self.contract_short_term_public_key, + &self.client_private_key, + &mut self.shared_key, + )?; + box_outer.set_public_key(self.client_public_key.to_vec()); + Ok(box_outer) + } + + /// Call this after sending the client authentication box. + /// There's no response message to pass to this method. + /// It transitions the channel to Established state. + pub fn authentication_sent(&mut self) -> Result<()> { + self.state.transition_to(SessionState::Established)?; + + Ok(()) + } + + /// Close secure channel. + /// + /// After the secure channel is closed, it must be reset to be used again. + pub fn close(&mut self) { + self.state.transition_to(SessionState::Closed).unwrap(); + } + + /// Check if secure channel is closed. + pub fn is_closed(&self) -> bool { + self.state == SessionState::Closed + } + + /// Check if messages must be encrypted based on current channel state. + /// + /// Messages can only be unencrypted when the channel is in initialization state + /// and must be encrypted in all other states. + pub fn must_encrypt(&self) -> bool { + self.state == SessionState::Established + } + + /// Get client short-term public key. + pub fn get_client_public_key(&self) -> &sodalite::BoxPublicKey { + &self.client_public_key + } + + /// Create cryptographic box with RPC request. + pub fn create_request_box( + &mut self, + request: &api::PlainClientRequest, + ) -> Result { + let mut crypto_box = create_box( + &request.write_to_bytes()?, + &NONCE_CONTEXT_REQUEST, + &mut self.short_term_nonce_generator, + &self.contract_short_term_public_key, + &self.client_private_key, + &mut self.shared_key, + )?; + + // Set public key so the contract knows which client this is. + crypto_box.set_public_key(self.client_public_key.to_vec()); + + Ok(crypto_box) + } + + /// Open cryptographic box with RPC response. + pub fn open_response_box( + &mut self, + response: &api::CryptoBox, + ) -> Result { + let plain_response = open_box( + &response, + &NONCE_CONTEXT_RESPONSE, + &mut self.short_term_nonce_generator, + &self.contract_short_term_public_key, + &self.client_private_key, + &mut self.shared_key, + )?; + + Ok(protobuf::parse_from_bytes(&plain_response)?) + } +} diff --git a/rpc/common/Cargo.toml b/rpc/common/Cargo.toml new file mode 100644 index 00000000000..33819a7f6d9 --- /dev/null +++ b/rpc/common/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "ekiden-rpc-common" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden common RPC functionality" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" +build = "build.rs" + +[dependencies] +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../../enclave/common", version = "0.1.0-alpha.1" } +protobuf = "1.4.3" +sodalite = "0.3.0" +byteorder = "1.2.1" + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +rand = "0.4.2" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/rpc/common/Makefile.toml b/rpc/common/Makefile.toml new file mode 100644 index 00000000000..9dcb41fa936 --- /dev/null +++ b/rpc/common/Makefile.toml @@ -0,0 +1,4 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_SGX_TARGET = "true" diff --git a/rpc/common/build.rs b/rpc/common/build.rs new file mode 100644 index 00000000000..395a6d56fd9 --- /dev/null +++ b/rpc/common/build.rs @@ -0,0 +1,15 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::generate_mod_with_imports( + "src/generated", + &["ekiden_enclave_common::generated::enclave_identity"], + &["enclave_rpc"], + ); + + ekiden_tools::protoc(ekiden_tools::ProtocArgs { + out_dir: "src/generated/", + input: &["src/enclave_rpc.proto"], + includes: &["src/", "../../enclave/common/src/"], + }); +} diff --git a/rpc/common/src/client.rs b/rpc/common/src/client.rs new file mode 100644 index 00000000000..2fdb1d12fbd --- /dev/null +++ b/rpc/common/src/client.rs @@ -0,0 +1,32 @@ +/// Endpoints available to the client inside an enclave. +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum ClientEndpoint { + /// IAS proxy (get SPID). + IASProxyGetSpid, + /// IAS proxy (verify quote). + IASProxyVerifyQuote, + /// Key manager contract. + KeyManager, +} + +impl ClientEndpoint { + /// Convert client endpoint from u16. + pub fn from_u16(value: u16) -> Option { + match value { + 0 => None, + 1 => Some(ClientEndpoint::IASProxyGetSpid), + 2 => Some(ClientEndpoint::IASProxyVerifyQuote), + 3 => Some(ClientEndpoint::KeyManager), + _ => None, + } + } + + /// Convert client endpoint to u16. + pub fn as_u16(&self) -> u16 { + match *self { + ClientEndpoint::IASProxyGetSpid => 1, + ClientEndpoint::IASProxyVerifyQuote => 2, + ClientEndpoint::KeyManager => 3, + } + } +} diff --git a/rpc/common/src/enclave_rpc.proto b/rpc/common/src/enclave_rpc.proto new file mode 100644 index 00000000000..c477c5bc727 --- /dev/null +++ b/rpc/common/src/enclave_rpc.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +package enclave_rpc; + +import "enclave_identity.proto"; + +// Cryptographic box (encrypted and authenticated). +message CryptoBox { + // Nonce. + bytes nonce = 1; + // Encrypted and authenticated payload. + bytes payload = 2; + // Optional originator public key. + bytes public_key = 3; +} + +message PlainClientRequest { + // Request method. + string method = 1; + // Payload (must be valid Protocol Buffers, based on given method). + bytes payload = 2; +} + +message ClientRequest { + oneof request { + // Plain-text request. + PlainClientRequest plain_request = 1; + // Encrypted request. + CryptoBox encrypted_request = 2; + } +} + +message EnclaveRequest { + // The part that comes from a client. + repeated ClientRequest client_request = 1; +} + +message Error { + // Error message. + string message = 1; +} + +message PlainClientResponse { + enum Code { + INVALID = 0; + + // 2xx indicades success. + SUCCESS = 200; + + // 4xx indicates errors. + ERROR = 400; + ERROR_BAD_REQUEST = 401; + ERROR_METHOD_NOT_FOUND = 402; + ERROR_SECURE_CHANNEL = 403; + ERROR_METHOD_SECURE = 404; + } + // Response code. + Code code = 1; + // Payload (must be valid Protocol Buffers, based on given method). + bytes payload = 2; +} + +message ClientResponse { + oneof response { + // Encrypted response. + CryptoBox encrypted_response = 1; + // Plain-text response. + PlainClientResponse plain_response = 2; + } +} + +message EnclaveResponse { + // The part that goes to a client. + repeated ClientResponse client_response = 1; +} + +// Meta methods. + +// Channel handshake (see issue #319) + +message AuthenticatedShortTermPublicKey { + // Sender's enclave identity proof. + enclave_identity.IdentityProof identity_proof = 1; + // Boxed 32-byte short-term public key. + CryptoBox boxed_short_term_public_key = 2; +} + +// (C') +message ChannelInitRequest { + // 32-byte client short-term public key. + bytes short_term_public_key = 1; +} + +// (AE, Box[E'](E->C')) +message ChannelInitResponse { + // Authenticated contract short-term public key. + // E->C' NONCE_CONTEXT_INIT without optional public key. + AuthenticatedShortTermPublicKey authenticated_short_term_public_key = 1; +} + +// Optional (C', Box[AC, Box[C'](C->E)](C'->E')) +message ChannelAuthRequest { + // Boxed authenticated client short-term private key. + // inner: C->E NONCE_CONTEXT_AUTHIN without optional public key. + // outer: C'->E' NONCE_CONTEXT_AUTHOUT with optional public key, + CryptoBox boxed_authenticated_short_term_public_key = 1; +} + +message ChannelAuthResponse { +} + +message ChannelCloseRequest { +} + +message ChannelCloseResponse { +} diff --git a/rpc/common/src/lib.rs b/rpc/common/src/lib.rs new file mode 100644 index 00000000000..61b7fe2720c --- /dev/null +++ b/rpc/common/src/lib.rs @@ -0,0 +1,23 @@ +extern crate byteorder; +extern crate protobuf; +extern crate sodalite; + +#[macro_use] +extern crate ekiden_common; +extern crate ekiden_enclave_common; + +pub mod reflection; +pub mod secure_channel; +pub mod client; + +mod generated; + +#[macro_use] +mod macros; + +mod protocol; + +pub mod api { + pub use generated::enclave_rpc::*; + pub use protocol::*; +} diff --git a/rpc/common/src/macros.rs b/rpc/common/src/macros.rs new file mode 100644 index 00000000000..bb13a56b917 --- /dev/null +++ b/rpc/common/src/macros.rs @@ -0,0 +1,67 @@ +/// Macro for creating API definitions. +/// +/// This is a meta-macro, which generates a new macro called `with_api` in its +/// place. The `with_api` macro can be used to invoke other macros while +/// passing the API as an argument to that macro. The position of the argument +/// containing the API definition is specified by using the special `api` token +/// in its place. +/// +/// # Examples +/// +/// For example, if you want to create enclave glue from the given API, and +/// have the `create_enclave_rpc` macro available, you can invoke it with this +/// concrete API by doing: +/// ``` +/// with_api! { +/// create_enclave_rpc!(api); +/// } +/// ``` +/// +/// # Limitations +/// +/// Currently the `api` token can only appear as the last argument and there +/// can be at most five arguments to the inner macro. +#[macro_export] +macro_rules! rpc_api { + ( + $($api: tt)* + ) => { + /// Invoke another macro passing the API as specified argument. + /// + /// # Examples + /// + /// For example, if you want to create enclave glue from the given API, and + /// have the `create_enclave_rpc` macro available, you can invoke it with this + /// concrete API by doing: + /// ``` + /// with_api! { + /// create_enclave_rpc!(api); + /// } + /// ``` + #[macro_export] + macro_rules! with_api { + // TODO: Repetition in nested macros currently not possible (see the Rust language + // issue: https://github.com/rust-lang/rust/issues/35853). This is also the + // reason why "api" can only be passed as the last argument. + ( $macro_name:ident ! ( api ) ; ) => { + $macro_name!( $($api)* ); + }; + + ( $macro_name:ident ! ( $arg0:tt, api ) ; ) => { + $macro_name!( $arg0, $($api)* ); + }; + + ( $macro_name:ident ! ( $arg0:tt, $arg1:tt, api ) ; ) => { + $macro_name!( $arg0, $arg1, $($api)* ); + }; + + ( $macro_name:ident ! ( $arg0:tt, $arg1:tt, $arg2:tt, api ) ; ) => { + $macro_name!( $arg0, $arg1, $arg2, $($api)* ); + }; + + ( $macro_name:ident ! ( $arg0:tt, $arg1:tt, $arg2:tt, $arg3:tt, api ) ; ) => { + $macro_name!( $arg0, $arg1, $arg2, $arg3, $($api)* ); + }; + } + } +} diff --git a/rpc/common/src/protocol.rs b/rpc/common/src/protocol.rs new file mode 100644 index 00000000000..584825ffd70 --- /dev/null +++ b/rpc/common/src/protocol.rs @@ -0,0 +1,6 @@ +/// Secure channel initialization request. +pub const METHOD_CHANNEL_INIT: &'static str = "_channel_init"; +/// Secure channel client authentication request. +pub const METHOD_CHANNEL_AUTH: &'static str = "_channel_auth"; +/// Secure channel teardown request. +pub const METHOD_CHANNEL_CLOSE: &'static str = "_channel_close"; diff --git a/rpc/common/src/reflection.rs b/rpc/common/src/reflection.rs new file mode 100644 index 00000000000..401d5b0d839 --- /dev/null +++ b/rpc/common/src/reflection.rs @@ -0,0 +1,8 @@ +/// Descriptor of an RPC API method. +pub struct ApiMethodDescriptor { + /// Method name. + pub name: String, + /// Whether the method call requires the client to be attested and therefore + /// the method handler can assume client's MRENCLAVE is available. + pub client_attestation_required: bool, +} diff --git a/rpc/common/src/secure_channel.rs b/rpc/common/src/secure_channel.rs new file mode 100644 index 00000000000..00efd1cf28d --- /dev/null +++ b/rpc/common/src/secure_channel.rs @@ -0,0 +1,283 @@ +//! Common structures for secure channels. +use byteorder::{ByteOrder, LittleEndian}; + +use sodalite; + +use ekiden_common::error::{Error, Result}; +use ekiden_common::random; + +use super::api; + +// Nonce context is used to prevent message reuse in a different context. +pub const NONCE_CONTEXT_LEN: usize = 16; +type NonceContext = [u8; NONCE_CONTEXT_LEN]; +/// Nonce for use in channel initialization context, contract -> client. +pub const NONCE_CONTEXT_INIT: NonceContext = *b"EkidenS-----Init"; +/// Nonce for use in channel authentication context, client -> contract. +pub const NONCE_CONTEXT_AUTHIN: NonceContext = *b"EkidenS---AuthIn"; +/// Nonce for use in channel authentication context, client -> contract. +pub const NONCE_CONTEXT_AUTHOUT: NonceContext = *b"EkidenS--AuthOut"; +/// Nonce for use in request context. +pub const NONCE_CONTEXT_REQUEST: NonceContext = *b"EkidenS--Request"; +/// Nonce for use in response context. +pub const NONCE_CONTEXT_RESPONSE: NonceContext = *b"EkidenS-Response"; + +/// Nonce generator. +pub trait NonceGenerator { + /// Reset nonce generator. + fn reset(&mut self); + + /// Generate a new nonce. + fn get_nonce(&mut self, context: &NonceContext) -> Result; + + /// Unpack nonce from a cryptographic box. + fn unpack_nonce( + &mut self, + crypto_box: &api::CryptoBox, + context: &NonceContext, + ) -> Result { + let mut nonce = [0u8; sodalite::BOX_NONCE_LEN]; + nonce.copy_from_slice(&crypto_box.get_nonce()); + + // Ensure that the nonce context is correct. + if nonce[..NONCE_CONTEXT_LEN] != context[..NONCE_CONTEXT_LEN] { + return Err(Error::new("Invalid nonce")); + } + + Ok(nonce) + } +} + +/// Random nonce generator. +pub struct RandomNonceGenerator {} + +impl RandomNonceGenerator { + /// Create new random nonce generator. + pub fn new() -> Self { + RandomNonceGenerator {} + } +} + +impl NonceGenerator for RandomNonceGenerator { + fn reset(&mut self) { + // No reset needed. + } + + fn get_nonce(&mut self, context: &NonceContext) -> Result { + let mut nonce: sodalite::BoxNonce = [0; sodalite::BOX_NONCE_LEN]; + random::get_random_bytes(&mut nonce)?; + + nonce[..NONCE_CONTEXT_LEN].copy_from_slice(context); + + Ok(nonce) + } +} + +impl Default for RandomNonceGenerator { + fn default() -> RandomNonceGenerator { + RandomNonceGenerator::new() + } +} + +/// Monotonic nonce generator. +pub struct MonotonicNonceGenerator { + /// Next nonce to be sent. + next_send_nonce: u64, + /// Last nonce that was received. + last_received_nonce: Option, +} + +impl MonotonicNonceGenerator { + /// Create new monotonic nonce generator. + pub fn new() -> Self { + MonotonicNonceGenerator { + next_send_nonce: 0, // TODO: Random initialization between 0 and 2**48 - 1? + last_received_nonce: None, + } + } +} + +impl NonceGenerator for MonotonicNonceGenerator { + /// Reset nonce generator. + fn reset(&mut self) { + self.next_send_nonce = 0; + self.last_received_nonce = None; + } + + fn get_nonce(&mut self, context: &NonceContext) -> Result { + let mut nonce: Vec = context.to_vec(); + nonce.append(&mut vec![0; 8]); + + LittleEndian::write_u64(&mut nonce[NONCE_CONTEXT_LEN..], self.next_send_nonce); + self.next_send_nonce += 1; + + assert_eq!(nonce.len(), sodalite::BOX_NONCE_LEN); + + let mut fixed_nonce: sodalite::BoxNonce = [0; sodalite::BOX_NONCE_LEN]; + fixed_nonce.copy_from_slice(&nonce); + + Ok(fixed_nonce) + } + + fn unpack_nonce( + &mut self, + crypto_box: &api::CryptoBox, + context: &NonceContext, + ) -> Result { + let mut nonce = [0u8; sodalite::BOX_NONCE_LEN]; + nonce.copy_from_slice(&crypto_box.get_nonce()); + + // Ensure that the nonce context is correct. + if nonce[..NONCE_CONTEXT_LEN] != context[..NONCE_CONTEXT_LEN] { + return Err(Error::new("Invalid nonce")); + } + + // Decode counter. + let counter_value = LittleEndian::read_u64(&nonce[NONCE_CONTEXT_LEN..]); + + // Ensure that the nonce has increased. + match self.last_received_nonce { + Some(last_nonce) => { + if counter_value <= last_nonce { + return Err(Error::new("Invalid nonce")); + } + } + None => {} + } + + self.last_received_nonce = Some(counter_value); + + Ok(nonce) + } +} + +impl Default for MonotonicNonceGenerator { + fn default() -> MonotonicNonceGenerator { + MonotonicNonceGenerator::new() + } +} + +/// Current state of the secure channel session. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SessionState { + /// Session has been closed and must be reset. + /// + /// After the session is reset, it will transition into `Init`. + Closed, + /// Session is being initialized. + /// + /// From this state, the session will transition into `ClientAuthenticating` or `Established`. + Init, + /// Client is authenticating (client only). + /// + /// From this state, the session will transition into `Established`. + /// The contract does not use this state. The contract is in the `Established` state while the + /// client is in this state. The contract tracks client authentication status in + /// `ekiden_rpc_trusted::secure_channel::ClientSession::client_mr_enclave`. + ClientAuthenticating, + /// Secure channel is established. + Established, +} + +impl SessionState { + /// Transition secure channel to a new state. + pub fn transition_to(&mut self, new_state: SessionState) -> Result<()> { + match (*self, new_state) { + (SessionState::Closed, SessionState::Init) => {} + (SessionState::Init, SessionState::Established) => {} + (SessionState::Init, SessionState::ClientAuthenticating) => {} + (SessionState::ClientAuthenticating, SessionState::Established) => {} + (_, SessionState::Closed) => {} + transition => { + return Err(Error::new(format!( + "Invalid secure channel state transition: {:?}", + transition + ))) + } + } + + // Update state if transition is allowed. + *self = new_state; + + Ok(()) + } +} + +impl Default for SessionState { + fn default() -> Self { + SessionState::Closed + } +} + +/// Create cryptographic box (encrypted and authenticated). +pub fn create_box( + payload: &[u8], + nonce_context: &NonceContext, + nonce_generator: &mut NG, + public_key: &sodalite::BoxPublicKey, + private_key: &sodalite::BoxSecretKey, + shared_key: &mut Option, +) -> Result { + let mut crypto_box = api::CryptoBox::new(); + let mut key_with_payload = vec![0u8; payload.len() + 32]; + let mut encrypted = vec![0u8; payload.len() + 32]; + let nonce = nonce_generator.get_nonce(&nonce_context)?; + + // First 32 bytes is used to store the shared secret key, so we must make + // room for it. The box_ method also requires that it is zero-initialized. + key_with_payload[32..].copy_from_slice(payload); + + if shared_key.is_none() { + // Compute shared key so we can speed up subsequent box operations. + let mut key = shared_key.get_or_insert([0u8; sodalite::SECRETBOX_KEY_LEN]); + sodalite::box_beforenm(&mut key, &public_key, &private_key); + } + + match sodalite::box_afternm( + &mut encrypted, + &key_with_payload, + &nonce, + &shared_key.unwrap(), + ) { + Ok(_) => {} + _ => return Err(Error::new("Box operation failed")), + }; + + crypto_box.set_nonce(nonce.to_vec()); + crypto_box.set_payload(encrypted); + + Ok(crypto_box) +} + +/// Open cryptographic box. +pub fn open_box( + crypto_box: &api::CryptoBox, + nonce_context: &NonceContext, + nonce_generator: &mut NG, + public_key: &sodalite::BoxPublicKey, + private_key: &sodalite::BoxSecretKey, + shared_key: &mut Option, +) -> Result> { + // Reserve space for payload. + let mut payload = vec![0u8; crypto_box.get_payload().len()]; + + if shared_key.is_none() { + // Compute shared key so we can speed up subsequent box operations. + let mut key = shared_key.get_or_insert([0u8; sodalite::SECRETBOX_KEY_LEN]); + sodalite::box_beforenm(&mut key, &public_key, &private_key); + } + + match sodalite::box_open_afternm( + &mut payload, + &crypto_box.get_payload(), + &nonce_generator.unpack_nonce(&crypto_box, &nonce_context)?, + &shared_key.unwrap(), + ) { + Ok(_) => { + // Trim first all-zero 32 bytes that were used to allocate space for the shared + // secret key. + Ok(payload[32..].to_vec()) + } + _ => Err(Error::new("Failed to open box")), + } +} diff --git a/rpc/edl/Cargo.toml b/rpc/edl/Cargo.toml new file mode 100644 index 00000000000..21acd749eae --- /dev/null +++ b/rpc/edl/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "ekiden-rpc-edl" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden RPC EDL" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/rpc/edl/Makefile.toml b/rpc/edl/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/rpc/edl/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/rpc/edl/src/lib.rs b/rpc/edl/src/lib.rs new file mode 100644 index 00000000000..56f50cb2edd --- /dev/null +++ b/rpc/edl/src/lib.rs @@ -0,0 +1,6 @@ +#[macro_use] +extern crate ekiden_tools; + +define_edl! { + "rpc.edl" +} diff --git a/rpc/edl/src/rpc.edl b/rpc/edl/src/rpc.edl new file mode 100644 index 00000000000..eeeb471d019 --- /dev/null +++ b/rpc/edl/src/rpc.edl @@ -0,0 +1,24 @@ +enclave { + trusted { + // Incoming RPC call interface (client -> enclave). + public void rpc_call( + [user_check] const uint8_t *request_data, + size_t request_length, + [user_check] uint8_t *response_data, + size_t response_capacity, + [out] size_t *response_length + ); + }; + + untrusted { + // Outgoing RPC call interface (enclave -> {enclave, service}). + void untrusted_rpc_call( + uint16_t endpoint, + [in, size=request_length] const uint8_t *request_data, + size_t request_length, + [out, size=response_capacity] uint8_t *response_data, + size_t response_capacity, + [out] size_t *response_length + ); + }; +}; diff --git a/rpc/trusted/Cargo.toml b/rpc/trusted/Cargo.toml new file mode 100644 index 00000000000..59a1accbb27 --- /dev/null +++ b/rpc/trusted/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ekiden-rpc-trusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden RPC (trusted part)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +sodalite = "0.3.0" +protobuf = "1.4.3" +lazy_static = { version = "1.0", features = ["spin_no_std"] } +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-common = { path = "../../enclave/common", version = "0.1.0-alpha.1" } +ekiden-enclave-trusted = { path = "../../enclave/trusted", version = "0.1.0-alpha.1" } +ekiden-rpc-common = { path = "../common", version = "0.1.0-alpha.1" } +ekiden-rpc-client = { path = "../client", version = "0.1.0-alpha.1" } + +[target.'cfg(target_env = "sgx")'.dependencies] +futures-sgx = { git = "https://github.com/ekiden/futures-rs" } + +[target.'cfg(not(target_env = "sgx"))'.dependencies] +rand = "0.4.2" +futures = "0.1" + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/rpc/trusted/Makefile.toml b/rpc/trusted/Makefile.toml new file mode 100644 index 00000000000..0914bd0f4f2 --- /dev/null +++ b/rpc/trusted/Makefile.toml @@ -0,0 +1,5 @@ +extend = "../Makefile.toml" + +[env] +BUILD_FOR_HOST_TARGET = "false" +BUILD_FOR_SGX_TARGET = "true" diff --git a/rpc/trusted/benches/benchmarks.rs b/rpc/trusted/benches/benchmarks.rs new file mode 100644 index 00000000000..64fd6409645 --- /dev/null +++ b/rpc/trusted/benches/benchmarks.rs @@ -0,0 +1,287 @@ +#![feature(test)] + +extern crate protobuf; +extern crate sodalite; +extern crate test; + +extern crate ekiden_common; +extern crate ekiden_enclave_trusted; +extern crate ekiden_rpc_common; +extern crate ekiden_rpc_trusted; + +use test::Bencher; + +use protobuf::{Message, MessageStatic}; +use protobuf::well_known_types::Empty; + +use ekiden_common::error::Result; +use ekiden_common::random; +use ekiden_rpc_common::api; +use ekiden_rpc_common::reflection::ApiMethodDescriptor; +use ekiden_rpc_common::secure_channel::{create_box, open_box, MonotonicNonceGenerator, + RandomNonceGenerator, NONCE_CONTEXT_INIT, + NONCE_CONTEXT_REQUEST, NONCE_CONTEXT_RESPONSE}; +use ekiden_rpc_trusted::dispatcher::{rpc_call, Dispatcher, EnclaveMethod}; +use ekiden_rpc_trusted::request::Request; + +/// Register an empty method. +fn register_empty_method() { + let mut dispatcher = Dispatcher::get(); + + // Register dummy RPC method. + dispatcher.add_method(EnclaveMethod::new( + ApiMethodDescriptor { + name: "benchmark_empty".to_owned(), + client_attestation_required: false, + }, + |_request: &Request| -> Result { Ok(Empty::new()) }, + )); +} + +/// Prepare secure channel enclave parameters. +fn prepare_secure_channel_enclave() { + ekiden_enclave_trusted::identity::nosgx_init_dummy(); +} + +/// Prepare secure channel client parameters. +fn prepare_secure_channel_client() -> (sodalite::BoxPublicKey, sodalite::BoxSecretKey) { + // Generate new short-term key pair for the client. + let mut seed = [0u8; 32]; + random::get_random_bytes(&mut seed).unwrap(); + + let mut public_key: sodalite::BoxPublicKey = [0u8; 32]; + let mut private_key: sodalite::BoxSecretKey = [0u8; 32]; + sodalite::box_keypair_seed(&mut public_key, &mut private_key, &seed); + + (public_key, private_key) +} + +/// Dispatch secure channel initialization request. +fn init_secure_channel( + public_key: &sodalite::BoxPublicKey, + private_key: &sodalite::BoxSecretKey, +) -> sodalite::BoxPublicKey { + // Generate request. + let mut request = api::ChannelInitRequest::new(); + request.set_short_term_public_key(public_key.to_vec()); + + // Dispatch channel init request. + let request = Request::new( + request.write_to_bytes().unwrap(), + api::METHOD_CHANNEL_INIT.to_owned(), + None, + None, + ); + + let dispatcher = Dispatcher::get(); + let mut response = dispatcher.dispatch(request); + let response = response.take_message().take_plain_response(); + assert_eq!(response.get_code(), api::PlainClientResponse_Code::SUCCESS); + + let response: api::ChannelInitResponse = + protobuf::parse_from_bytes(response.get_payload()).unwrap(); + + let mut nonce_generator = RandomNonceGenerator::new(); + let stpk_vec = open_box( + response + .get_authenticated_short_term_public_key() + .get_boxed_short_term_public_key(), + &NONCE_CONTEXT_INIT, + &mut nonce_generator, + &ekiden_enclave_trusted::identity::get_identity() + .public + .rpc_key_e_pub, + private_key, + &mut None, + ).unwrap(); + + let mut short_term_public_key = [0u8; 32]; + short_term_public_key.copy_from_slice(&stpk_vec); + + short_term_public_key +} + +/// Dispatch secure channel request. +fn make_secure_channel_request( + nonce_generator: &mut MonotonicNonceGenerator, + contract_public_key: &sodalite::BoxPublicKey, + public_key: &sodalite::BoxPublicKey, + private_key: &sodalite::BoxSecretKey, + mut shared_key: &mut Option, + method: S, + request: Rq, +) -> Rs +where + S: Into, + Rq: Message, + Rs: Message + MessageStatic, +{ + let mut plain_client_request = api::PlainClientRequest::new(); + plain_client_request.set_method(method.into()); + plain_client_request.set_payload(request.write_to_bytes().unwrap()); + + let mut crypto_box = create_box( + &plain_client_request.write_to_bytes().unwrap(), + &NONCE_CONTEXT_REQUEST, + nonce_generator, + &contract_public_key, + &private_key, + &mut shared_key, + ).unwrap(); + + // Set public key so the contract knows which client this is. + crypto_box.set_public_key(public_key.to_vec()); + + let mut client_request = api::ClientRequest::new(); + client_request.set_encrypted_request(crypto_box); + + // Generate encrypted enclave request. + let mut enclave_request = api::EnclaveRequest::new(); + enclave_request.mut_client_request().push(client_request); + + let enclave_request = enclave_request.write_to_bytes().unwrap(); + + let mut response: Vec = Vec::with_capacity(64 * 1024); + let mut response_length = 0; + + // Invoke the RPC call ECALL handler. + rpc_call( + enclave_request.as_ptr(), + enclave_request.len(), + response.as_mut_ptr(), + response.capacity(), + &mut response_length, + ); + + unsafe { + response.set_len(response_length); + } + + // Decrypt response. + let enclave_response: api::EnclaveResponse = protobuf::parse_from_bytes(&response).unwrap(); + assert_eq!(enclave_response.get_client_response().len(), 1); + + let client_response = &enclave_response.get_client_response()[0]; + assert!(client_response.has_encrypted_response()); + + let plain_response = open_box( + &client_response.get_encrypted_response(), + &NONCE_CONTEXT_RESPONSE, + nonce_generator, + &contract_public_key, + &private_key, + &mut shared_key, + ).unwrap(); + + let plain_response: api::PlainClientResponse = + protobuf::parse_from_bytes(&plain_response).unwrap(); + assert_eq!( + plain_response.get_code(), + api::PlainClientResponse_Code::SUCCESS + ); + + protobuf::parse_from_bytes(&plain_response.get_payload()).unwrap() +} + +/// Benchmark dispatch of a plain empty Protocol Buffers request. +#[bench] +fn benchmark_dispatch_empty_request(b: &mut Bencher) { + register_empty_method(); + + // Prepare a dummy request. + let request = Request::new( + Empty::new().write_to_bytes().unwrap(), + "benchmark_empty".to_owned(), + None, + None, + ); + + b.iter(|| { + let dispatcher = Dispatcher::get(); + let mut response = dispatcher.dispatch(request.clone()); + assert_eq!( + response.take_message().get_plain_response().get_code(), + api::PlainClientResponse_Code::SUCCESS + ); + }); +} + +/// Benchmark secure channel initialization. +/// +/// Note that this includes generating client cryptographic parameters. +#[bench] +fn benchmark_secure_channel_init(b: &mut Bencher) { + register_empty_method(); + prepare_secure_channel_enclave(); + + b.iter(|| { + let (public_key, private_key) = prepare_secure_channel_client(); + init_secure_channel(&public_key, &private_key); + }); +} + +/// Benchmark dispatch of an encrypted empty Protocol Buffers request over a secure channel, +/// where the shared key is only derived once and then cached (which is what the actual +/// client does as well). +/// +/// Note that this includes generating encrypted requests for the client. +#[bench] +fn benchmark_secure_channel_empty_request(b: &mut Bencher) { + register_empty_method(); + prepare_secure_channel_enclave(); + let (public_key, private_key) = prepare_secure_channel_client(); + let contract_public_key = init_secure_channel(&public_key, &private_key); + let mut nonce_generator = MonotonicNonceGenerator::new(); + let mut shared_key: Option = None; + + // First request to initialize shared key. + let _response: Empty = make_secure_channel_request( + &mut nonce_generator, + &contract_public_key, + &public_key, + &private_key, + &mut shared_key, + "benchmark_empty", + Empty::new(), + ); + + b.iter(|| { + let _response: Empty = make_secure_channel_request( + &mut nonce_generator, + &contract_public_key, + &public_key, + &private_key, + &mut shared_key, + "benchmark_empty", + Empty::new(), + ); + }); +} + +/// Benchmark dispatch of an encrypted empty Protocol Buffers request over a secure channel, +/// where the shared key is derived each time, requring the use of expensive public key +/// operations. +/// +/// Note that this includes generating encrypted requests for the client. +#[bench] +fn benchmark_secure_channel_empty_request_no_shared_key(b: &mut Bencher) { + register_empty_method(); + prepare_secure_channel_enclave(); + let (public_key, private_key) = prepare_secure_channel_client(); + let contract_public_key = init_secure_channel(&public_key, &private_key); + let mut nonce_generator = MonotonicNonceGenerator::new(); + + b.iter(|| { + // Use an empty shared key each time to force expensive public key ops. + let mut shared_key: Option = None; + let _response: Empty = make_secure_channel_request( + &mut nonce_generator, + &contract_public_key, + &public_key, + &private_key, + &mut shared_key, + "benchmark_empty", + Empty::new(), + ); + }); +} diff --git a/rpc/trusted/src/client.rs b/rpc/trusted/src/client.rs new file mode 100644 index 00000000000..2d9f3768c48 --- /dev/null +++ b/rpc/trusted/src/client.rs @@ -0,0 +1,67 @@ +//! OCALL-based RPC client backend used inside enclaves. + +use futures::future::{self, Future}; + +use ekiden_common::error::Result; +use ekiden_enclave_trusted::identity; +use ekiden_rpc_client::ClientFuture; +use ekiden_rpc_client::backend::{ContractClientBackend, ContractClientCredentials}; +use ekiden_rpc_common::api; +use ekiden_rpc_common::client::ClientEndpoint; + +use super::untrusted; + +/// Contract client that can be used inside enclaves. +/// +/// It relays contract calls via an OCALL to the untrusted world which may then +/// dispatch the calls to other compute nodes. +pub struct OcallContractClientBackend { + /// Endpoint that the client is connecting to. + endpoint: ClientEndpoint, +} + +impl OcallContractClientBackend { + /// Construct new OCALL contract client backend. + pub fn new(endpoint: ClientEndpoint) -> Result { + Ok(OcallContractClientBackend { endpoint: endpoint }) + } +} + +impl ContractClientBackend for OcallContractClientBackend { + /// Spawn future using an executor. + fn spawn(&self, _future: F) { + panic!("Attempted to spawn future using OCALL backend"); + } + + /// Call contract. + fn call(&self, client_request: api::ClientRequest) -> ClientFuture { + let endpoint = self.endpoint.clone(); + + Box::new(future::lazy(move || { + Ok(untrusted::untrusted_call_endpoint( + &endpoint, + client_request, + )?) + })) + } + + /// Call contract with raw data. + fn call_raw(&self, client_request: Vec) -> ClientFuture> { + let endpoint = self.endpoint.clone(); + + Box::new(future::lazy(move || { + Ok(untrusted::untrusted_call_endpoint_raw( + &endpoint, + client_request, + )?) + })) + } + + /// Get credentials. + fn get_credentials(&self) -> Option { + Some(ContractClientCredentials { + long_term_private_key: identity::get_identity().rpc_key_e_priv, + identity_proof: identity::get_proof(), + }) + } +} diff --git a/rpc/trusted/src/dispatcher.rs b/rpc/trusted/src/dispatcher.rs new file mode 100644 index 00000000000..db43b707659 --- /dev/null +++ b/rpc/trusted/src/dispatcher.rs @@ -0,0 +1,326 @@ +//! RPC method dispatcher. +use std::collections::HashMap; +#[cfg(not(target_env = "sgx"))] +use std::sync::{Mutex, MutexGuard}; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutexGuard as MutexGuard; + +use ekiden_common::error::Result; +use ekiden_common::profile_block; +use ekiden_common::serializer::{Deserializable, Serializable}; +use ekiden_enclave_trusted::utils::{read_enclave_request, write_enclave_response}; +use ekiden_rpc_common::api; +use ekiden_rpc_common::reflection::ApiMethodDescriptor; + +use super::{request, response}; +use super::error::DispatchError; +use super::secure_channel::open_request_box; + +/// List of methods that allow plain requests. All other requests must be done over +/// a secure channel. +const PLAIN_METHODS: &'static [&'static str] = &[ + api::METHOD_CHANNEL_INIT, + // Authentication uses its own boxes very similar to RPC encryption, but with its own nonce + // contexts. + api::METHOD_CHANNEL_AUTH, +]; + +/// Handler for an API method. +pub trait ApiMethodHandler { + /// Invoke the method implementation and return a response. + fn handle(&self, request: &request::Request) -> Result; +} + +impl ApiMethodHandler for F +where + Request: Send + 'static, + Response: Send + 'static, + F: Fn(&request::Request) -> Result + Send + Sync + 'static, +{ + fn handle(&self, request: &request::Request) -> Result { + (*self)(request) + } +} + +/// Dispatcher for an API method. +pub trait ApiMethodHandlerDispatch { + /// Dispatches the given raw request. + fn dispatch(&self, request: &request::Request>) -> response::Response; +} + +struct ApiMethodHandlerDispatchImpl { + descriptor: ApiMethodDescriptor, + handler: Box + Sync + Send>, +} + +impl ApiMethodHandlerDispatch for ApiMethodHandlerDispatchImpl +where + Request: Deserializable + Send + 'static, + Response: Serializable + Send + 'static, +{ + /// Dispatches the given raw request. + fn dispatch(&self, request: &request::Request>) -> response::Response { + // If the method requires client attestation ensure that it has been provided. + if self.descriptor.client_attestation_required && request.get_client_mr_enclave().is_none() + { + return response::Response::error( + &request, + api::PlainClientResponse_Code::ERROR_BAD_REQUEST, + "Method requires client attestation", + ); + } + + // Deserialize request. + let request_message = match Deserializable::read(&request) { + Ok(message) => request.copy_metadata_to(message), + _ => { + return response::Response::error( + &request, + api::PlainClientResponse_Code::ERROR_BAD_REQUEST, + "Unable to parse request payload", + ) + } + }; + + // Invoke handler. + let response = match self.handler.handle(&request_message) { + Ok(response) => response, + Err(error) => { + return response::Response::error( + &request, + api::PlainClientResponse_Code::ERROR, + error.message.as_str(), + ) + } + }; + + // Serialize response. + let response = match Response::write(&response) { + Ok(response) => response, + _ => { + return response::Response::error( + &request, + api::PlainClientResponse_Code::ERROR, + "Unable to serialize response payload", + ) + } + }; + + response::Response::success(&request, response) + } +} + +/// Enclave method descriptor. +pub struct EnclaveMethod { + /// Method name. + name: String, + dispatcher: Box, +} + +impl EnclaveMethod { + /// Create a new enclave method descriptor. + pub fn new(method: ApiMethodDescriptor, handler: Handler) -> Self + where + Request: Deserializable + Send + 'static, + Response: Serializable + Send + 'static, + Handler: ApiMethodHandler + Sync + Send + 'static, + { + EnclaveMethod { + name: method.name.clone(), + dispatcher: Box::new(ApiMethodHandlerDispatchImpl { + descriptor: method, + handler: Box::new(handler), + }), + } + } + + pub fn get_name(&self) -> &String { + &self.name + } + + pub fn dispatch(&self, request: &request::Request>) -> response::Response { + self.dispatcher.dispatch(&request) + } +} + +lazy_static! { + // Global RPC dispatcher object. + static ref DISPATCHER: Mutex = Mutex::new(Dispatcher::new()); +} + +/// RPC method dispatcher. +/// +/// The dispatcher holds all registered RPC methods and provides an entry point +/// for their invocation. +pub struct Dispatcher { + /// Registered RPC methods. + methods: HashMap, +} + +impl Dispatcher { + /// Create a new RPC dispatcher instance. + pub fn new() -> Self { + let mut dispatcher = Dispatcher { + methods: HashMap::new(), + }; + + // Register internal methods. + dispatcher.add_method(EnclaveMethod::new( + ApiMethodDescriptor { + name: api::METHOD_CHANNEL_INIT.to_owned(), + client_attestation_required: false, + }, + |request: &request::Request| { + super::secure_channel::channel_init(request) + }, + )); + + dispatcher.add_method(EnclaveMethod::new( + ApiMethodDescriptor { + name: api::METHOD_CHANNEL_AUTH.to_owned(), + client_attestation_required: false, + }, + |request: &request::Request| { + super::secure_channel::channel_auth(request) + }, + )); + + dispatcher + } + + /// Global dispatcher instance. + /// + /// Calling this method will take a lock on the global instance which + /// will be released once the value goes out of scope. + pub fn get<'a>() -> MutexGuard<'a, Self> { + DISPATCHER.lock().unwrap() + } + + /// Register a new method in the dispatcher. + pub fn add_method(&mut self, method: EnclaveMethod) { + self.methods.insert(method.get_name().clone(), method); + } + + /// Dispatches a raw RPC request. + pub fn dispatch(&self, request: request::Request>) -> response::Response { + // If an error occurred during request processing, forward it. + if let Some(ref error) = request.get_error() { + return response::Response::error(&request, error.code, &error.message); + } + + // Get request method. + let method = request + .get_method() + .expect("Non-errored request without method passed to dispatcher"); + + match self.methods.get(method) { + Some(method_dispatch) => method_dispatch.dispatch(&request), + None => response::Response::error( + &request, + api::PlainClientResponse_Code::ERROR_METHOD_NOT_FOUND, + "Method not found", + ), + } + } +} + +/// RPC dispatch ECALL entry point. +/// +/// This method gets executed every time there are some requests are to +/// be dispatched into this enclave. +#[no_mangle] +pub extern "C" fn rpc_call( + request_data: *const u8, + request_length: usize, + response_data: *mut u8, + response_capacity: usize, + response_length: *mut usize, +) { + // Parse requests. + let requests = { + profile_block!("parse_request"); + + let mut enclave_request: api::EnclaveRequest = + read_enclave_request(request_data, request_length); + let client_requests = enclave_request.take_client_request(); + let mut requests = vec![]; + + for mut client_request in client_requests.into_iter() { + if client_request.has_encrypted_request() { + // Encrypted request. + let plain_request = match open_request_box(&client_request.get_encrypted_request()) + { + Ok(plain_request) => plain_request, + _ => request::Request::error(DispatchError::new( + api::PlainClientResponse_Code::ERROR_SECURE_CHANNEL, + "Unable to open secure channel request", + )), + }; + + requests.push(plain_request); + } else { + // Plain request. + let mut plain_request = client_request.take_plain_request(); + let plain_request = match PLAIN_METHODS + .iter() + .find(|&method| method == &plain_request.get_method()) + { + Some(_) => request::Request::new( + plain_request.take_payload(), + plain_request.take_method(), + None, + None, + ), + None => { + // Method requires a secure channel. + request::Request::error(DispatchError::new( + api::PlainClientResponse_Code::ERROR_METHOD_SECURE, + "Method call must be made over a secure channel", + )) + } + }; + + requests.push(plain_request); + } + } + + requests + }; + + // Process requests. + let responses = { + profile_block!("process_requests"); + + let dispatcher = Dispatcher::get(); + let mut responses = vec![]; + for request in requests { + responses.push(dispatcher.dispatch(request)); + } + + responses + }; + + // Generate response. + { + profile_block!("return_response"); + + // Add all responses. + let mut enclave_response = api::EnclaveResponse::new(); + { + let client_responses = enclave_response.mut_client_response(); + for mut response in responses { + client_responses.push(response.take_message()); + } + } + + // Copy back response. + write_enclave_response( + &enclave_response, + response_data, + response_capacity, + response_length, + ); + } +} diff --git a/rpc/trusted/src/error.rs b/rpc/trusted/src/error.rs new file mode 100644 index 00000000000..5ad56d1d8a0 --- /dev/null +++ b/rpc/trusted/src/error.rs @@ -0,0 +1,21 @@ +//! RPC-specific error types. +use ekiden_rpc_common::api; + +/// Error that may occur during request dispatch. +#[derive(Debug, Clone)] +pub struct DispatchError { + /// Error code. + pub code: api::PlainClientResponse_Code, + /// Human-readable message. + pub message: String, +} + +impl DispatchError { + /// Creates a new dispatch error. + pub fn new(code: api::PlainClientResponse_Code, message: &str) -> Self { + DispatchError { + code, + message: message.to_string(), + } + } +} diff --git a/rpc/trusted/src/lib.rs b/rpc/trusted/src/lib.rs new file mode 100644 index 00000000000..99379d35555 --- /dev/null +++ b/rpc/trusted/src/lib.rs @@ -0,0 +1,34 @@ +#![feature(use_extern_macros)] +#![feature(core_intrinsics)] + +#[cfg(target_env = "sgx")] +extern crate sgx_tse; +#[cfg(target_env = "sgx")] +extern crate sgx_tseal; +#[cfg(target_env = "sgx")] +extern crate sgx_types; + +extern crate futures; +#[macro_use] +extern crate lazy_static; +extern crate protobuf; +extern crate sodalite; + +extern crate ekiden_common; +extern crate ekiden_enclave_common; +extern crate ekiden_enclave_trusted; +extern crate ekiden_rpc_client; +extern crate ekiden_rpc_common; + +pub mod dispatcher; +pub mod error; +pub mod request; +pub mod response; + +pub mod secure_channel; + +#[macro_use] +mod macros; + +mod untrusted; +pub mod client; diff --git a/rpc/trusted/src/macros.rs b/rpc/trusted/src/macros.rs new file mode 100644 index 00000000000..e545ded1b03 --- /dev/null +++ b/rpc/trusted/src/macros.rs @@ -0,0 +1,51 @@ +/// Registers defined RPC methods into the enclave RPC dispatcher. +/// +/// # Examples +/// +/// This macro should be invoked using a concrete API generated by `rpc_api` as +/// follows: +/// ``` +/// with_api! { +/// create_enclave_rpc!(api); +/// } +/// ``` +#[macro_export] +macro_rules! create_enclave_rpc { + ( + metadata { + name = $metadata_name:ident ; + version = $metadata_version:expr ; + client_attestation_required = $client_attestation_required:expr ; + } + + $( + rpc $method_name:ident ( $request_type:ty ) -> $response_type:ty ; + )* + ) => { + #[cfg(target_env = "sgx")] + global_ctors_object! { + ENCLAVE_RPC_INIT, enclave_rpc_init = { + use ekiden_core::error::Result; + use ekiden_core::rpc::reflection::ApiMethodDescriptor; + use ekiden_trusted::rpc::dispatcher::{Dispatcher, EnclaveMethod}; + use ekiden_trusted::rpc::request::Request; + + // Register generated methods using the dispatcher. + let mut dispatcher = Dispatcher::get(); + $( + dispatcher.add_method( + EnclaveMethod::new( + ApiMethodDescriptor { + name: stringify!($method_name).to_owned(), + client_attestation_required: $client_attestation_required, + }, + |request: &Request<$request_type>| -> Result<$response_type> { + $method_name(request) + }, + ) + ); + )* + } + } + } +} diff --git a/rpc/trusted/src/request.rs b/rpc/trusted/src/request.rs new file mode 100644 index 00000000000..5888331f460 --- /dev/null +++ b/rpc/trusted/src/request.rs @@ -0,0 +1,109 @@ +//! RPC request type. +use std::ops::Deref; + +use ekiden_enclave_common::quote::MrEnclave; + +use super::error::DispatchError; + +/// Wrapper for requests to provide additional request metadata. +#[derive(Debug, Clone)] +pub struct Request { + /// Underlying request message. + message: Option, + /// Request method name. + method: Option, + /// Client short-term public key (if request is authenticated). + public_key: Option>, + /// Client MRENCLAVE (if channel is mutually authenticated). + mr_enclave: Option, + /// Optional error occurred during request processing. + error: Option, +} + +impl Request { + /// Create new request wrapper from message. + pub fn new( + message: T, + method: String, + public_key: Option>, + mr_enclave: Option, + ) -> Self { + Request { + message: Some(message), + method: Some(method), + public_key: public_key, + mr_enclave: mr_enclave, + error: None, + } + } + + /// Create new request with dispatch error. + pub fn error(error: DispatchError) -> Self { + Request { + message: None, + method: None, + public_key: None, + mr_enclave: None, + error: Some(error), + } + } + + /// Copy metadata of the current request into a new request object. + /// + /// This method can be used when extracting a part of a request data (e.g. the + /// payload) and the caller would like to keep the associated metadata. The + /// metadata will be cloned and the given `message` will be wrapped into a + /// [`Request`] object. + /// + /// [`Request`]: Request + pub fn copy_metadata_to(&self, message: M) -> Request { + Request { + message: Some(message), + method: self.method.clone(), + public_key: self.public_key.clone(), + mr_enclave: self.mr_enclave.clone(), + error: None, + } + } + + /// Get short-term public key of the client making this request. + /// + /// If the request was made over a non-secure channel, this will be [`None`]. + /// + /// [`None`]: std::option::Option + pub fn get_client_public_key(&self) -> Option<&Vec> { + self.public_key.as_ref() + } + + /// Get MRENCLAVE of the client making this request. + /// + /// If the request was made over a channel without client attestation, this + /// will be [`None`]. + /// + /// [`None`]: std::option::Option + pub fn get_client_mr_enclave(&self) -> Option<&MrEnclave> { + self.mr_enclave.as_ref() + } + + /// Get optional error if any occurred during dispatch. + pub fn get_error(&self) -> Option<&DispatchError> { + self.error.as_ref() + } + + /// Get optional request method name. + pub fn get_method(&self) -> Option<&String> { + self.method.as_ref() + } +} + +impl Deref for Request { + type Target = T; + + /// Dereferences the request into underlying message. + /// + /// If there is no message (e.g., due to request processing resulting in an + /// erro), dereferencing will panic. + fn deref(&self) -> &T { + &self.message.as_ref().unwrap() + } +} diff --git a/rpc/trusted/src/response.rs b/rpc/trusted/src/response.rs new file mode 100644 index 00000000000..46eaeab6ce3 --- /dev/null +++ b/rpc/trusted/src/response.rs @@ -0,0 +1,86 @@ +//! RPC response type. +use std; + +use protobuf::Message; + +use ekiden_rpc_common::api; + +use super::request::Request; +use super::secure_channel::create_response_box; + +/// Wrapper for responses. +pub struct Response { + /// Response message. + message: api::ClientResponse, +} + +impl Response { + /// Create new response. + pub fn new(request: &Request, response: api::PlainClientResponse) -> Self { + let mut message = api::ClientResponse::new(); + if let Some(ref public_key) = request.get_client_public_key() { + // Encrypted response. + match create_response_box(&public_key, &response) { + Ok(response_box) => message.set_encrypted_response(response_box), + _ => { + // Failed to create a cryptographic box for the response. This could + // be due to the session being incorrect or due to other issues. In + // this case, we should generate a plain error message. + message.set_plain_response(Self::generate_error( + api::PlainClientResponse_Code::ERROR_SECURE_CHANNEL, + "Failed to generate secure channel response", + )); + } + }; + } else { + // Plain response. + message.set_plain_response(response); + } + + Response { message } + } + + /// Create success response. + pub fn success(request: &Request, payload: Vec) -> Self { + // Prepare response. + let mut response = api::PlainClientResponse::new(); + response.set_code(api::PlainClientResponse_Code::SUCCESS); + response.set_payload(payload); + + Self::new(&request, response) + } + + /// Create error response. + pub fn error( + request: &Request, + error: api::PlainClientResponse_Code, + message: &str, + ) -> Self { + Self::new(&request, Self::generate_error(error, &message)) + } + + /// Generate error response. + fn generate_error( + error: api::PlainClientResponse_Code, + message: &str, + ) -> api::PlainClientResponse { + // Prepare response. + let mut response = api::PlainClientResponse::new(); + response.set_code(error); + + let mut error = api::Error::new(); + error.set_message(message.to_string()); + + let payload = error.write_to_bytes().expect("Failed to serialize error"); + response.set_payload(payload); + + response + } + + /// Take response message. + /// + /// After calling this method, a default message will be left in its place. + pub fn take_message(&mut self) -> api::ClientResponse { + std::mem::replace(&mut self.message, api::ClientResponse::new()) + } +} diff --git a/rpc/trusted/src/secure_channel.rs b/rpc/trusted/src/secure_channel.rs new file mode 100644 index 00000000000..fd7175bd56b --- /dev/null +++ b/rpc/trusted/src/secure_channel.rs @@ -0,0 +1,336 @@ +//! Secure channel handling. +use protobuf; +use protobuf::Message; +use sodalite; + +use std::collections::HashMap; +#[cfg(not(target_env = "sgx"))] +use std::sync::{Mutex, MutexGuard}; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutex as Mutex; +#[cfg(target_env = "sgx")] +use std::sync::SgxMutexGuard as MutexGuard; + +use ekiden_common::error::{Error, Result}; +use ekiden_common::random; +use ekiden_enclave_common; +use ekiden_enclave_common::quote::MrEnclave; +use ekiden_enclave_trusted; +use ekiden_enclave_trusted::crypto::{SecretSeed, SECRET_SEED_LEN}; +use ekiden_rpc_common::api; +use ekiden_rpc_common::secure_channel::{self, MonotonicNonceGenerator, RandomNonceGenerator, + SessionState}; + +use super::request::Request; + +/// Single secure channel session between client and contract. +#[derive(Default)] +pub struct ClientSession { + /// Client short-term public key. + client_public_key: sodalite::BoxPublicKey, + /// Contract short-term public key. + contract_public_key: sodalite::BoxPublicKey, + /// Contract short-term private key. + contract_private_key: sodalite::BoxSecretKey, + /// Cached shared key. + shared_key: Option, + /// Short-term nonce generator. + nonce_generator: MonotonicNonceGenerator, + /// Session state. + state: SessionState, + /// Client long-term public key (if authenticated). + client_long_term_public_key: Option, + /// Client MRENCLAVE (if authenticated). + client_mr_enclave: Option, +} + +/// Secure channel context. +pub struct SecureChannelContext { + /// Contract short-term keypairs, keyed with client short-term keys. + sessions: HashMap, + /// Long-term nonce generator. + nonce_generator: RandomNonceGenerator, +} + +impl SecureChannelContext { + /// Create new secure channel context. + pub fn new() -> Self { + SecureChannelContext { + sessions: HashMap::new(), + nonce_generator: RandomNonceGenerator::new(), + } + } + + /// Global secure channel context instance. + /// + /// Calling this method will take a lock on the global instance which + /// will be released once the value goes out of scope. + pub fn get<'a>() -> MutexGuard<'a, Self> { + SECURE_CHANNEL_CTX.lock().unwrap() + } + + /// Convert client short-term public key into session hash map key. + fn get_session_key(public_key: &[u8]) -> Result { + if public_key.len() != sodalite::BOX_PUBLIC_KEY_LEN { + return Err(Error::new("Bad short-term client key")); + } + + let mut key: sodalite::BoxPublicKey = [0; sodalite::BOX_PUBLIC_KEY_LEN]; + key.copy_from_slice(&public_key); + + Ok(key) + } + + /// Create a new client session. + /// + /// Returns a cryptographic box, encrypted to the client short-term key and + /// authenticated by the contract long-term key. + pub fn create_session( + &mut self, + public_key: &[u8], + ) -> Result { + let key = SecureChannelContext::get_session_key(&public_key)?; + + if self.sessions.contains_key(&key) { + return Err(Error::new("Session already exists")); + } + + let mut session = ClientSession::new(key.clone())?; + session.transition_to(SessionState::Established)?; + + let box_inner = secure_channel::create_box( + session.get_contract_public_key(), + &secure_channel::NONCE_CONTEXT_INIT, + &mut self.nonce_generator, + session.get_client_public_key(), + &ekiden_enclave_trusted::identity::get_identity().rpc_key_e_priv, + &mut None, + )?; + let mut astpk = api::AuthenticatedShortTermPublicKey::new(); + astpk.set_identity_proof(ekiden_enclave_trusted::identity::get_proof()); + astpk.set_boxed_short_term_public_key(box_inner); + + // TODO: What about session table overflows? + + self.sessions.insert(key, session); + + Ok(astpk) + } + + /// Lookup existing client session. + /// Pass our `sessions` field, so that we don't have to borrow the whole channel context. + fn get_session<'a>( + sessions: &'a mut HashMap, + public_key: &[u8], + ) -> Result<&'a mut ClientSession> { + let key = SecureChannelContext::get_session_key(&public_key)?; + + match sessions.get_mut(&key) { + Some(session) => Ok(session), + None => Err(Error::new("Client session not found")), + } + } + + /// Authenticate a session's client. + pub fn authenticate_client(&mut self, box_outer: &api::CryptoBox) -> Result<()> { + let session = + SecureChannelContext::get_session(&mut self.sessions, box_outer.get_public_key())?; + + let astpk_bytes = secure_channel::open_box( + &box_outer, + &secure_channel::NONCE_CONTEXT_AUTHOUT, + &mut session.nonce_generator, + &session.client_public_key, + &session.contract_private_key, + &mut session.shared_key, + )?; + let astpk: api::AuthenticatedShortTermPublicKey = protobuf::parse_from_bytes(&astpk_bytes)?; + + let iai = ekiden_enclave_common::quote::verify(astpk.get_identity_proof())?; + + let bound_client_stpk_bytes = secure_channel::open_box( + astpk.get_boxed_short_term_public_key(), + &secure_channel::NONCE_CONTEXT_AUTHIN, + &mut self.nonce_generator, + &iai.identity.rpc_key_e_pub, + &ekiden_enclave_trusted::identity::get_identity().rpc_key_e_priv, + &mut None, + )?; + if &bound_client_stpk_bytes != &session.client_public_key { + return Err(Error::new( + "Key in client's authentication request doesn't match channel key", + )); + } + + session.client_long_term_public_key = Some(iai.identity.rpc_key_e_pub); + session.client_mr_enclave = Some(iai.mr_enclave); + + Ok(()) + } + + /// Close an existing session. + pub fn close_session(&mut self, public_key: &[u8]) -> Result<()> { + let key = SecureChannelContext::get_session_key(&public_key)?; + + self.sessions.remove(&key); + + Ok(()) + } +} + +impl ClientSession { + /// Create a new client session. + pub fn new(public_key: sodalite::BoxPublicKey) -> Result { + let mut session = ClientSession::default(); + session.transition_to(SessionState::Init)?; + session.client_public_key = public_key; + + // Generate new keypair. + let mut seed: SecretSeed = [0; SECRET_SEED_LEN]; + match random::get_random_bytes(&mut seed) { + Ok(_) => {} + Err(_) => return Err(Error::new("Keypair generation failed")), + } + + sodalite::box_keypair_seed( + &mut session.contract_public_key, + &mut session.contract_private_key, + &seed, + ); + + // Cache shared channel key. + { + let mut key = session + .shared_key + .get_or_insert([0u8; sodalite::SECRETBOX_KEY_LEN]); + sodalite::box_beforenm( + &mut key, + &session.client_public_key, + &session.contract_private_key, + ); + } + + Ok(session) + } + + /// Get client short-term public key. + pub fn get_client_public_key(&self) -> &sodalite::BoxPublicKey { + &self.client_public_key + } + + /// Get contract short-term public key. + pub fn get_contract_public_key(&self) -> &sodalite::BoxPublicKey { + &self.contract_public_key + } + + /// Open cryptographic box with RPC request. + pub fn open_request_box(&mut self, request: &api::CryptoBox) -> Result>> { + let plain_request = secure_channel::open_box( + &request, + &secure_channel::NONCE_CONTEXT_REQUEST, + &mut self.nonce_generator, + &self.client_public_key, + &self.contract_private_key, + &mut self.shared_key, + )?; + + let mut plain_request: api::PlainClientRequest = + protobuf::parse_from_bytes(&plain_request)?; + + // Check if this request is allowed based on current channel state. + match self.state { + SessionState::Established => {} + _ => { + return Err(Error::new("Invalid method call in this state")); + } + } + + Ok(Request::new( + plain_request.take_payload(), + plain_request.take_method(), + Some(self.client_public_key.to_vec()), + self.client_mr_enclave.clone(), + )) + } + + /// Create cryptographic box with RPC response. + pub fn create_response_box( + &mut self, + response: &api::PlainClientResponse, + ) -> Result { + Ok(secure_channel::create_box( + &response.write_to_bytes()?, + &secure_channel::NONCE_CONTEXT_RESPONSE, + &mut self.nonce_generator, + &self.client_public_key, + &self.contract_private_key, + &mut self.shared_key, + )?) + } + + /// Transition secure channel to a new state. + pub fn transition_to(&mut self, new_state: SessionState) -> Result<()> { + Ok(self.state.transition_to(new_state)?) + } +} + +lazy_static! { + // Global secure channel context. + static ref SECURE_CHANNEL_CTX: Mutex = + Mutex::new(SecureChannelContext::new()); +} + +/// Initialize secure channel. +pub fn channel_init(request: &api::ChannelInitRequest) -> Result { + let mut channel = SECURE_CHANNEL_CTX.lock().unwrap(); + + // Create new session. + let astpk = channel.create_session(request.get_short_term_public_key())?; + + let mut response = api::ChannelInitResponse::new(); + response.set_authenticated_short_term_public_key(astpk); + + Ok(response) +} + +/// Authenticate client. +pub fn channel_auth(request: &api::ChannelAuthRequest) -> Result { + let mut channel = SECURE_CHANNEL_CTX.lock().unwrap(); + let box_outer = request.get_boxed_authenticated_short_term_public_key(); + + channel.authenticate_client(box_outer)?; + + Ok(api::ChannelAuthResponse::new()) +} + +/// Close secure channel. +pub fn channel_close(public_key: &[u8]) -> Result<()> { + let mut channel = SECURE_CHANNEL_CTX.lock().unwrap(); + + channel.close_session(&public_key)?; + + Ok(()) +} + +/// Open cryptographic box with RPC request. +pub fn open_request_box(request: &api::CryptoBox) -> Result>> { + let mut channel = SECURE_CHANNEL_CTX.lock().unwrap(); + + Ok( + SecureChannelContext::get_session(&mut channel.sessions, &request.get_public_key())? + .open_request_box(&request)?, + ) +} + +/// Create cryptographic box with RPC response. +pub fn create_response_box( + public_key: &[u8], + response: &api::PlainClientResponse, +) -> Result { + let mut channel = SECURE_CHANNEL_CTX.lock().unwrap(); + + Ok( + SecureChannelContext::get_session(&mut channel.sessions, &public_key)? + .create_response_box(&response)?, + ) +} diff --git a/rpc/trusted/src/untrusted.rs b/rpc/trusted/src/untrusted.rs new file mode 100644 index 00000000000..8f35c126893 --- /dev/null +++ b/rpc/trusted/src/untrusted.rs @@ -0,0 +1,96 @@ +#[cfg(target_env = "sgx")] +use sgx_types::*; + +#[cfg(target_env = "sgx")] +use protobuf::{self, Message, MessageStatic}; + +use ekiden_common::error::{Error, Result}; +use ekiden_rpc_common::client::ClientEndpoint; + +/// OCALLs defined by the Ekiden enclave specification. +#[cfg(target_env = "sgx")] +extern "C" { + /// Interface for outgoing RPC calls (to other enclaves or services). + pub fn untrusted_rpc_call( + endpoint: u16, + request_data: *const u8, + request_length: usize, + response_data: *mut u8, + response_capacity: usize, + response_length: *mut usize, + ) -> sgx_status_t; +} + +/// Perform an untrusted RPC call against a given (untrusted) endpoint. +/// +/// How the actual RPC call is implemented depends on the handler implemented +/// in the untrusted part. +#[cfg(target_env = "sgx")] +pub fn untrusted_call_endpoint(endpoint: &ClientEndpoint, request: Rq) -> Result +where + Rq: Message, + Rs: Message + MessageStatic, +{ + Ok(protobuf::parse_from_bytes(&untrusted_call_endpoint_raw( + &endpoint, + request.write_to_bytes()?, + )?)?) +} + +#[cfg(not(target_env = "sgx"))] +pub fn untrusted_call_endpoint(_endpoint: &ClientEndpoint, _request: Rq) -> Result { + Err(Error::new("Only supported in SGX builds")) +} + +/// Perform a raw RPC call against a given (untrusted) endpoint. +/// +/// How the actual RPC call is implemented depends on the handler implemented +/// in the untrusted part. +#[cfg(target_env = "sgx")] +pub fn untrusted_call_endpoint_raw( + endpoint: &ClientEndpoint, + mut request: Vec, +) -> Result> { + // Maximum size of serialized response is 64K. + let mut response: Vec = Vec::with_capacity(64 * 1024); + + // Ensure that request is actually allocated as the length of the actual request + // may be zero and in that case the OCALL will fail with SGX_ERROR_INVALID_PARAMETER. + request.reserve(1); + + let mut response_length = 0; + let status = unsafe { + untrusted_rpc_call( + endpoint.as_u16(), + request.as_ptr() as *const u8, + request.len(), + response.as_mut_ptr() as *mut u8, + response.capacity(), + &mut response_length, + ) + }; + + match status { + sgx_status_t::SGX_SUCCESS => {} + status => { + return Err(Error::new(format!( + "Enclave RPC OCALL failed: {:?}", + status + ))); + } + } + + unsafe { + response.set_len(response_length); + } + + Ok(response) +} + +#[cfg(not(target_env = "sgx"))] +pub fn untrusted_call_endpoint_raw( + _endpoint: &ClientEndpoint, + _request: Vec, +) -> Result> { + Err(Error::new("Only supported in SGX builds")) +} diff --git a/rpc/untrusted/Cargo.toml b/rpc/untrusted/Cargo.toml new file mode 100644 index 00000000000..118f91b8b01 --- /dev/null +++ b/rpc/untrusted/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "ekiden-rpc-untrusted" +version = "0.1.0-alpha.1" +authors = ["Ekiden Developers "] +description = "Ekiden RPC (untrusted part)" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[features] +sgx-simulation = [] + +[dependencies] +sgx_types = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +sgx_urts = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +protobuf = "1.4.2" +lazy_static = "1.0" +ekiden-common = { path = "../../common", version = "0.1.0-alpha.1" } +ekiden-enclave-untrusted = { path = "../../enclave/untrusted", version = "0.1.0-alpha.1" } +ekiden-rpc-common = { path = "../common", version = "0.1.0-alpha.1" } + +[build-dependencies] +ekiden-tools = { path = "../../tools", version = "0.1.0-alpha.1" } diff --git a/rpc/untrusted/Makefile.toml b/rpc/untrusted/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/rpc/untrusted/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/rpc/untrusted/build.rs b/rpc/untrusted/build.rs new file mode 100644 index 00000000000..9eece0734a3 --- /dev/null +++ b/rpc/untrusted/build.rs @@ -0,0 +1,5 @@ +extern crate ekiden_tools; + +fn main() { + ekiden_tools::detect_sgx_features(); +} diff --git a/rpc/untrusted/src/ecall_proxy.rs b/rpc/untrusted/src/ecall_proxy.rs new file mode 100644 index 00000000000..5b2bcef4451 --- /dev/null +++ b/rpc/untrusted/src/ecall_proxy.rs @@ -0,0 +1,13 @@ +use sgx_types::*; + +extern "C" { + /// Call enclave RPC system. + pub fn rpc_call( + eid: sgx_enclave_id_t, + request_data: *const u8, + request_length: usize, + response_data: *const u8, + response_capacity: usize, + response_length: *mut usize, + ) -> sgx_status_t; +} diff --git a/rpc/untrusted/src/enclave.rs b/rpc/untrusted/src/enclave.rs new file mode 100644 index 00000000000..8a5da6bdb9a --- /dev/null +++ b/rpc/untrusted/src/enclave.rs @@ -0,0 +1,107 @@ +//! Enclave RPC interface. +use sgx_types::*; + +use protobuf; +use protobuf::{Message, MessageStatic, RepeatedField}; + +use ekiden_common::error::{Error, Result}; +use ekiden_enclave_untrusted::Enclave; +use ekiden_rpc_common::api; + +use super::ecall_proxy; + +pub trait EnclaveRpc { + /// Maximum response size (in kilobytes). + const MAX_RESPONSE_SIZE: usize = 1024; + + /// Perform a plain-text RPC call against the enclave. + fn call(&self, method: &str, request: &R) -> Result; + + /// Perform a raw RPC call against the enclave. + fn call_raw(&self, request: Vec) -> Result>; +} + +impl EnclaveRpc for Enclave { + /// Perform a plain-text RPC call against the enclave with no state. + fn call(&self, method: &str, request: &R) -> Result { + // Prepare plain request. + let mut plain_request = api::PlainClientRequest::new(); + plain_request.set_method(String::from(method)); + plain_request.set_payload(request.write_to_bytes()?); + + let mut client_request = api::ClientRequest::new(); + client_request.set_plain_request(plain_request); + + let mut enclave_request = api::EnclaveRequest::new(); + enclave_request.set_client_request(RepeatedField::from_slice(&[client_request])); + + let enclave_request_bytes = enclave_request.write_to_bytes()?; + let enclave_response_bytes = self.call_raw(enclave_request_bytes)?; + + let enclave_response: api::EnclaveResponse = + match protobuf::parse_from_bytes(enclave_response_bytes.as_slice()) { + Ok(enclave_response) => enclave_response, + _ => return Err(Error::new("Response parse error")), + }; + + let client_response = &enclave_response.get_client_response()[0]; + + // Plain request requires a plain response. + assert!(client_response.has_plain_response()); + let plain_response = client_response.get_plain_response(); + + // Validate response code. + match plain_response.get_code() { + api::PlainClientResponse_Code::SUCCESS => {} + _ => { + // Deserialize error. + let error: api::Error = + match protobuf::parse_from_bytes(plain_response.get_payload()) { + Ok(error) => error, + _ => return Err(Error::new("Unable to parse error payload")), + }; + + return Err(Error::new(error.get_message())); + } + }; + + // Deserialize response. + match protobuf::parse_from_bytes(plain_response.get_payload()) { + Ok(response) => Ok(response), + _ => Err(Error::new("Unable to parse response payload")), + } + } + + /// Perform a raw RPC call against the enclave. + fn call_raw(&self, mut request: Vec) -> Result> { + // Reserve space up to the maximum size of serialized response. + // TODO: Can we avoid allocating large response buffers each time? + let mut response: Vec = Vec::with_capacity(Self::MAX_RESPONSE_SIZE * 1024); + + // Ensure that request is actually allocated as the length of the actual request + // may be zero and in that case the OCALL will fail with SGX_ERROR_INVALID_PARAMETER. + request.reserve(1); + + let mut response_length = 0; + let status = unsafe { + ecall_proxy::rpc_call( + self.get_id(), + request.as_ptr() as *const u8, + request.len(), + response.as_mut_ptr() as *mut u8, + response.capacity(), + &mut response_length, + ) + }; + + if status != sgx_status_t::SGX_SUCCESS { + return Err(Error::new("Failed to call enclave RPC")); + } + + unsafe { + response.set_len(response_length); + } + + Ok(response) + } +} diff --git a/rpc/untrusted/src/lib.rs b/rpc/untrusted/src/lib.rs new file mode 100644 index 00000000000..2b69c9c685d --- /dev/null +++ b/rpc/untrusted/src/lib.rs @@ -0,0 +1,34 @@ +extern crate protobuf; +extern crate sgx_types; +extern crate sgx_urts; + +#[macro_use] +extern crate lazy_static; + +extern crate ekiden_common; +extern crate ekiden_enclave_untrusted; +extern crate ekiden_rpc_common; + +pub mod enclave; +#[doc(hidden)] +pub mod ocall_proxy; +#[doc(hidden)] +pub mod ecall_proxy; + +#[macro_use] +pub mod router; + +// Exports. +pub use enclave::EnclaveRpc; + +// For the below link statements to work, the library paths need to be correctly +// configured. The easiest way to achieve that is to use the build_untrusted +// helper from ekiden_tools. + +// Ensure that we link to sgx_urts library. +#[cfg_attr(not(feature = "sgx-simulation"), link(name = "sgx_urts"))] +#[cfg_attr(feature = "sgx-simulation", link(name = "sgx_urts_sim"))] +// Ensure that we link to sgx_uae_service library. +#[cfg_attr(not(feature = "sgx-simulation"), link(name = "sgx_uae_service"))] +#[cfg_attr(feature = "sgx-simulation", link(name = "sgx_uae_service_sim"))] +extern "C" {} diff --git a/rpc/untrusted/src/ocall_proxy.rs b/rpc/untrusted/src/ocall_proxy.rs new file mode 100644 index 00000000000..e80d8dd0436 --- /dev/null +++ b/rpc/untrusted/src/ocall_proxy.rs @@ -0,0 +1,86 @@ +use sgx_types::*; + +use std; +use std::ptr; + +use ekiden_rpc_common::client::ClientEndpoint; + +use super::router::RpcRouter; + +/// Proxy for sgx_init_quote. +#[no_mangle] +pub extern "C" fn untrusted_init_quote( + p_target_info: *mut sgx_target_info_t, + p_gid: *mut sgx_epid_group_id_t, +) -> sgx_status_t { + unsafe { sgx_init_quote(p_target_info, p_gid) } +} + +/// Proxy for sgx_get_quote. +#[no_mangle] +pub extern "C" fn untrusted_get_quote( + p_report: *const sgx_report_t, + quote_type: sgx_quote_sign_type_t, + p_spid: *const sgx_spid_t, + p_quote: *mut u8, + _quote_capacity: u32, + quote_size: *mut u32, +) -> sgx_status_t { + // Calculate quote size. + let status = unsafe { sgx_calc_quote_size(ptr::null(), 0, quote_size) }; + + match status { + sgx_status_t::SGX_SUCCESS => {} + _ => return status, + }; + + // Get quote from the quoting enclave. + unsafe { + sgx_get_quote( + p_report, + quote_type, + p_spid, + ptr::null(), + ptr::null(), + 0, + ptr::null_mut(), + p_quote as *mut sgx_quote_t, + *quote_size, + ) + } +} + +/// Interface for outgoing RPC calls (to other enclaves or services). +#[no_mangle] +pub extern "C" fn untrusted_rpc_call( + endpoint: u16, + request_data: *const u8, + request_length: usize, + response_data: *mut u8, + response_capacity: usize, + response_length: *mut usize, +) { + // Convert raw request to Rust datatypes. + let request = unsafe { std::slice::from_raw_parts(request_data, request_length) }; + + // Invoke dispatcher. + let response = match ClientEndpoint::from_u16(endpoint) { + Some(endpoint) => RpcRouter::get().dispatch(&endpoint, request.to_vec()), + None => { + // Bad endpoint. + // TODO: Handle errors? + vec![] + } + }; + + // Convert response back to raw bytes. + if response.len() <= response_capacity { + unsafe { + for i in 0..response.len() as isize { + std::ptr::write(response_data.offset(i), response[i as usize]); + } + + *response_length = response.len(); + }; + } +} diff --git a/rpc/untrusted/src/router.rs b/rpc/untrusted/src/router.rs new file mode 100644 index 00000000000..b508d76541c --- /dev/null +++ b/rpc/untrusted/src/router.rs @@ -0,0 +1,90 @@ +//! Untrusted router for RPC requests coming outside enclaves. +use std::collections::HashMap; +use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; + +use ekiden_common::error::Result; +use ekiden_rpc_common::client::ClientEndpoint; + +/// Handler for endpoints. +/// +/// The handler receives raw request bytes as input and is supposed to +/// return raw response bytes. +pub trait Handler: Send + Sync + 'static { + /// Return a list of endpoints that the handler can handle. + fn get_endpoints(&self) -> Vec; + + /// Handle a request and return a response. + fn handle(&self, endpoint: &ClientEndpoint, request: Vec) -> Result>; +} + +lazy_static! { + /// Global RpcRouter for all the enclaves. + /// + /// This must be global, because we need to be able to get the current router + /// when we are invoked from an OCALL and at that point we only have global + /// state available. + static ref RPC_ROUTER: RwLock = RwLock::new(RpcRouter::new()); +} + +/// Router for RPC requests coming from enclaves. +/// +/// Users of [`EnclaveRpc`] should register handlers for endpoints supported by +/// [`ClientEndpoint`]. +/// +/// [`EnclaveRpc`]: super::EnclaveRpc +/// [`ClientEndpoint`]: ekiden_rpc_common::client::ClientEndpoint +pub struct RpcRouter { + /// Registered routes. + routes: HashMap>, +} + +impl RpcRouter { + /// Create a new router instance. + fn new() -> Self { + RpcRouter { + routes: HashMap::new(), + } + } + + /// Get the current global RpcRouter instance. + /// + /// Calling this method will take a write lock on the global instance, which + /// will be released once the value goes out of scope. + pub fn get_mut<'a>() -> RwLockWriteGuard<'a, RpcRouter> { + RPC_ROUTER.write().unwrap() + } + + /// Get the current global RpcRouter instance. + /// + /// Calling this method will take a lock on the global instance, which will + /// be released once the value goes out of scope. + pub fn get<'a>() -> RwLockReadGuard<'a, RpcRouter> { + RPC_ROUTER.read().unwrap() + } + + /// Register a new endpoint handler. + pub fn add_handler(&mut self, handler: H) -> &mut Self { + let handler = Arc::new(handler); + + for endpoint in handler.get_endpoints() { + self.routes.insert(endpoint, handler.clone()); + } + + self + } + + /// Dispatch a request. + /// + /// If no handler is registered for the given endpoint, an empty response is + /// returned. + pub fn dispatch(&self, endpoint: &ClientEndpoint, request: Vec) -> Vec { + match self.routes.get(endpoint) { + Some(handler) => match handler.handle(&endpoint, request) { + Ok(response) => response, + _ => vec![], + }, + // No endpoint handler matches. + None => vec![], + } + } +} diff --git a/scripts/benchmark.py b/scripts/benchmark.py new file mode 100755 index 00000000000..59c0946d86c --- /dev/null +++ b/scripts/benchmark.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import json +import subprocess +import sys + +def benchmark_crate(benchmarks, crate): + have_errors = False + + # Run benchmark using JSON output. + print("=== Benchmarking crate '{}' ===".format(crate)) + try: + output = subprocess.check_output( + ['cargo', 'bench', '-p', crate, '--', '-Z', 'unstable-options', '--format', 'json'] + ) + except subprocess.CalledProcessError as error: + output = error.output + have_errors = True + + # Process resulting lines to create a build artifact. + for line in output.split('\n'): + if not line: + continue + + event = json.loads(line) + + if event['type'] == 'bench': + benchmarks[event['name']] = {'median': event['median'], 'deviation': event['deviation']} + print("{name}: {median} ns / iter, deviation {deviation} ns".format(**event)) + elif event['type'] == 'test' and event['event'] == 'failed': + print("ERROR: Test '{name}' has failed with following output:".format(**event)) + print("===") + print(event['stdout']) + print("===") + have_errors = True + + return have_errors + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=None) + parser.add_argument('crate', type=str, nargs='+', + help="Crates to benchmark") + parser.add_argument('--output', type=str, + help="Output filename") + parser.add_argument('--compare-to', type=str, + help="Previous output to compare to") + parser.add_argument('--fail-deviations', type=int, default=3, + help="Fail if new benchmark is more than this many deviations slower") + args = parser.parse_args() + + # Run benchmarks. + benchmarks = {} + have_errors = False + for crate in args.crate: + if benchmark_crate(benchmarks.setdefault(crate, {}), crate): + have_errors = True + + if have_errors: + print("ERROR: Benchmarks completed with errors, aborting.") + sys.exit(1) + + # Compare against previous results and abort if these are worse. + if args.compare_to: + print("=== Comparing against previous results ===") + try: + with open(args.compare_to) as compare: + compare = json.load(compare) + except IOError: + print("WARNING: Failed to load previous results from '{}'.".format(args.compare_to)) + compare = {} + + for crate, latest in benchmarks.items(): + previous = compare.get(crate, None) + if previous is None: + continue + + for benchmark, result in latest.items(): + previous_result = previous.get(benchmark, None) + if previous_result is None: + continue + + diff = result['median'] - previous_result['median'] + print("{crate}/{name}: difference {diff} ns / iter, previous deviation {deviation} ns".format( + crate=crate, + name=benchmark, + diff=diff, + deviation=previous_result['deviation'], + )) + + max_diff = max(args.fail_deviations * previous_result['deviation'], 100) + + if diff > max_diff: + print("ERROR: Benchmark '{}' is much slower in the current build:".format(benchmark)) + print(" Runtime:", result['median'], "ns") + print(" Difference:", diff, "ns") + print(" Deviation:", previous_result['deviation'], "ns") + print("") + # TODO: Performance on CI is too variable to fail builds due to slow benchmarks. + # have_errors = True + + # Store results. + if args.output: + with open(args.output, 'w') as output: + json.dump(benchmarks, output) + + if have_errors: + sys.exit(1) diff --git a/scripts/make-release.py b/scripts/make-release.py new file mode 100755 index 00000000000..a7cf1568cf9 --- /dev/null +++ b/scripts/make-release.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +import argparse +import collections +import os +import re +import sys +import subprocess + +SECTION = re.compile(r'^\[(.+)\]') +VERSION = re.compile(r'(version\s*=\s*")(.+)(")') +DEPENDENCIES = re.compile(r'^(?:dependencies|build-dependencies|target\..+?\.dependencies|dependencies\.(\w+))$') +INTERNAL_CRATES = re.compile(r'ekiden-.*') +DOCKER_FROM = re.compile(r'FROM (.+?)(:.+)?$') +CI_IMAGE = re.compile(r'(\s*-\s*image:\s*)(.+?)$') +DEV_IMAGE = re.compile(r'(ekiden/development:).+(})') + +# Message used for version bump commits. +VERSION_BUMP_MESSAGE = "Bump version to {version}" +# Message used for release tags. +TAG_MESSAGE = "Release {version}" + + +def git(*args, **kwargs): + """Run a Git command and return its output.""" + return subprocess.check_output(['git'] + list(args), **kwargs).decode('utf8').strip() + + +def cargo(*args, **kwargs): + """Run a Cargo command.""" + return subprocess.check_call(['cargo'] + list(args), **kwargs) + + +def docker(*args, **kwargs): + """Run a Docker command.""" + return subprocess.check_call(['docker'] + list(args), **kwargs) + + +def get_crates(root_dir): + """Return crates under the given directory.""" + for root, dirs, files in os.walk(root_dir): + if 'Cargo.toml' in files: + # Skip untracked crates. + if not git('ls-files', os.path.join(root, 'Cargo.toml')): + continue + + with open(os.path.join(root, 'Cargo.toml')) as config_file: + config = config_file.readlines() + + # Skip Cargo.tomls without packages. + for line in config: + section = SECTION.match(line) + if section and section.group(1) == 'package': + break + else: + continue + + yield (root, config) + + +def replace_version(string, new_version): + """Replace version in given string.""" + return VERSION.sub('\\g<1>{}\\g<3>'.format(new_version), string) + + +def bump_version(root_dir, new_version): + """Bump version of all crates.""" + for path, config in get_crates(root_dir): + print("Processing crate '{}'".format(path)) + + output = [] + current_section = None + for line in config: + section = SECTION.match(line) + if section: + current_section = section.group(1) + + # Replace version in package metadata. + if current_section == 'package': + line = replace_version(line, new_version) + + # Replace version in dependencies. + dependencies = DEPENDENCIES.match(current_section) + if dependencies: + if dependencies.group(1): + print(dependencies.group(1)) + crate = dependencies.group(1) + else: + try: + crate = line.split('=')[0].strip() + except IndexError: + crate = '' + + if INTERNAL_CRATES.match(crate): + line = replace_version(line, new_version) + + output.append(line) + + # Write updated Cargo.toml. + with open(os.path.join(path, 'Cargo.toml'), 'w') as config_file: + config_file.write(''.join(output)) + + +def commit(version, sign=False): + """Create a Git commit.""" + # Add all modified files. + git('add', '--update') + + # Commit changes. + args = ['commit', '--message', VERSION_BUMP_MESSAGE.format(version=version)] + if sign: + args += ['--gpg-sign'] + else: + args += ['--no-gpg-sign'] + + git(*args) + + +def create_tag(version, sign=False): + """Create a Git tag.""" + args = ['tag', '--message', TAG_MESSAGE.format(version=version), version] + if sign: + args += ['--sign'] + + git(*args) + + +def publish(root_dir): + """Publish crates.""" + for path, config in get_crates(root_dir): + print("Publishing crate '{}'".format(path)) + + # We must use --no-verify as otherwise we cannot upload packages in arbitrary order. + cargo('publish', '--no-verify', cwd=path) + + +def bump_docker_version(root_dir, version, image_dir, dockerfile='Dockerfile'): + """Bump Dockerfile dependency version.""" + filename = os.path.join(root_dir, image_dir, dockerfile) + if not git('ls-files', filename): + print('ERROR: Dockerfile not in Git repository: {}'.format(filename)) + sys.exit(1) + + with open(filename) as dockerfile: + lines = dockerfile.readlines() + + output = [] + for line in lines: + upstream = DOCKER_FROM.match(line) + if upstream: + line = DOCKER_FROM.sub(r'FROM \1:{}'.format(version), line) + + output.append(line) + + # Write updated Dockerfile. + with open(filename, 'w') as dockerfile: + dockerfile.write(''.join(output)) + + +def docker_build(root_dir, version, docker_dir, image): + """Build and tag a Docker image.""" + docker( + 'build', '--force-rm', '--no-cache', '-t', '{}:{}'.format(image, version), '.', + cwd=os.path.join(root_dir, docker_dir), + ) + + +def docker_push(image, tag): + """Push Docker image.""" + docker('push', '{}:{}'.format(image, tag)) + + +def ci_update_image(root_dir, image, tag): + """Update image used on CI.""" + filename = os.path.join(root_dir, '.circleci/config.yml') + + with open(filename) as ci_file: + lines = ci_file.readlines() + + output = [] + for line in lines: + line = CI_IMAGE.sub(r'\1{}:{}'.format(image, tag), line) + + output.append(line) + + # Write updated Dockerfile. + with open(filename, 'w') as ci_file: + ci_file.write(''.join(output)) + + +def script_update_version(root_dir, script, tag): + """Update image used in sgx-enter script.""" + filename = os.path.join(root_dir, 'scripts', script) + + with open(filename) as dev_file: + lines = dev_file.readlines() + + output = [] + for line in lines: + line = DEV_IMAGE.sub(r'\g<1>{}\g<2>'.format(tag), line) + + output.append(line) + + # Write updated Dockerfile. + with open(filename, 'w') as dev_file: + dev_file.write(''.join(output)) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Make an Ekiden release") + parser.add_argument('version', type=str, + help="New version to release as") + parser.add_argument('--dev-version', type=str, + help="New development version to use after release") + parser.add_argument('--sign', action='store_true', + help="Sign commits and tags") + parser.add_argument('--git-remote', type=str, default='origin', + help="Git remote to push to") + parser.add_argument('--no-publish', action='store_false', dest='publish', + help="Skip Cargo publish") + parser.add_argument('--no-push', action='store_false', dest='push', + help="Skip Git push") + parser.add_argument('--bump-docker-images', action='store_true', + help="Also version bump, build and tag Docker images") + args = parser.parse_args() + + # Parse current top-level directory. + root_dir = git('rev-parse', '--show-toplevel') + # Ensure Git directory is clean. + status = git('status', '--porcelain', '--untracked-files=no') + if status: + print("ERROR: Repository is not clean, please commit or stash your changes.") + sys.exit(1) + + # Bump version to new version. + print("=== Bumping versions to '{}'...".format(args.version)) + bump_version(root_dir, args.version) + + # Build and tag Docker images. + if args.bump_docker_images: + print('=== Building and tagging Docker images...') + bump_docker_version(root_dir, args.version, 'docker/testing') + bump_docker_version(root_dir, args.version, 'docker/deployment', dockerfile='Dockerfile.build') + ci_update_image(root_dir, 'ekiden/testing', args.version) + script_update_version(root_dir, 'sgx-enter.sh', args.version) + script_update_version(root_dir, 'sgx-enter-hw.sh', args.version) + + docker_build(root_dir, args.version, 'docker/development', 'ekiden/development') + docker_build(root_dir, args.version, 'docker/testing', 'ekiden/development') + docker_push('ekiden/development', args.version) + docker_push('ekiden/testing', args.version) + + # Add modified files and commit version bump. + print("=== Commiting version bump...") + commit(args.version, sign=args.sign) + + # Create tag. + print("=== Creating release tag...") + create_tag(args.version, sign=args.sign) + + # Cargo publish. + if args.publish: + print("=== Publishing to Crates.io...") + publish(root_dir) + + # Change development version when configured. + if args.dev_version: + print("=== Bumping versions to '{}'...".format(args.dev_version)) + bump_version(root_dir, args.dev_version) + + print("=== Commiting version bump...") + commit(args.dev_version, sign=args.sign) + + # Push changes to remote. + if args.push: + print("=== Pushing to {}...".format(args.git_remote)) + git('push', args.git_remote) + git('push', args.git_remote, args.version) diff --git a/scripts/parse_enclave.py b/scripts/parse_enclave.py new file mode 100755 index 00000000000..39b1256e5c2 --- /dev/null +++ b/scripts/parse_enclave.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python +""" +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +Copyright (c) 2016 Nagravision S.A. +""" +import argparse + +from binascii import hexlify +from struct import unpack +import sys + +if sys.version_info[0] != 2: + print('sorry, parse_enclave.py does not support Python 3 yet :(') + sys.exit(1) + +try: + from elftools.elf.elffile import ELFFile +except: + print('elftools needed! try: pip install pyelftools') + sys.exit(1) + + +def rsa_check(n, s, q1, q2): + qq1 = s**2 // n + if qq1 != q1: + return False + qq2 = (s**3 - q1*s*n) // n + if qq2 != q2: + return False + return True + + +class Parser(object): + def __init__(self, filename): + # we need to pass a stream to ELFFile + self.filename = filename + try: + self.blob = open(filename, 'rb').read() + except IOError as e: + print('%s' % str(e)) + sys.exit(1) + + def find_sgxmeta_header(self): + sgxmeta_header = "\x4c\x0e\x5d\x63\x94\x02\xa8\x86\x01\x00\x00\x00\x01\x00\x00\x00" + pos = self.blob.find(sgxmeta_header) + if pos != -1: + return pos + return None + + def find_sigstruct_header(self): + sigstruct_header = b"\x06\x00\x00\x00\xe1\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00" + sigstruct_header2 = b"\x01\x01\x00\x00\x60\x00\x00\x00\x60\x00\x00\x00\x01\x00\x00\x00" + # find the first header + pos = self.blob.find(sigstruct_header) + if pos != -1: + # check the second header, 8 bytes after the first one + if self.blob[pos+24:][:16] == sigstruct_header2: + # we did a match, return pos + return pos + return None + + def find_weak_sigstruct_header(self): + sigstruct_header = b"\x06\x00\x00\x00\xe1\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x80" + sigstruct_header2 = b"\x01\x01\x00\x00\x60\x00\x00\x00\x60\x00\x00\x00\x01\x00\x00\x00" + # find the first header + pos = self.blob.find(sigstruct_header) + if pos != -1: + # check the second header, 8 bytes after the first one + if self.blob[pos+24:][:16] == sigstruct_header2: + # we did a match, return pos + return pos + return None + + # as found in 64 bit platform enclaves + def find_ecalls_offset(self): + # NOTE: this is a best effort heuristic to extract ECALLs table + # memory address. It's based on manual analysis and rely + # on finding the right things at the expected place. + ecalls_magic = "\x44\x49\x43\x4f" + # it's usually located in more than one place + pos = 0 + while True: + pos = self.blob.find(ecalls_magic, pos+4) + if pos == -1: + break + # skip danger zone + pos += 16 + # find mov opcode in next 32 bytes (488b15xxxxxxxx) + # movpos = self.blob.find("\x48\x8b\x15", pos, pos+32) + movpos = self.blob.find("\x48\x8b", pos, pos+32) + # we have a match! + if movpos != -1: + # extract address offset from mov opcode + offset, = unpack("= section['raddr'] and raddr < section['raddr'] + section['rsize']: + return raddr - section['raddr'] + section['vaddr'] + return None + + def get_raddr(self, vaddr): + for section in self.get_sections(): + if vaddr >= section['vaddr'] and \ + vaddr < section['vaddr'] + section['rsize']: + return vaddr - section['vaddr'] + section['raddr'] + return None + + + def size(self): + return len(self.blob) + + def sgxmeta(self, pos): + values = [] + values.append(('header', self.blob[pos:][:16])) + values.append(('struct_size', unpack(">1&1)) + fields.append(('mode64bit', ord(blob[0])>>2&1)) + fields.append(('reserved', ord(blob[0])>>3&1)) + fields.append(('provisionkey', ord(blob[0])>>4&1)) + fields.append(('einitokenkey', ord(blob[0])>>5&1)) + # reserved? bits 6:63 + fields.append(('xfrm', blob[8:])) + return fields + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=None) + parser.add_argument('filename', type=str, + help="Signed enclave filename") + parser.add_argument('--only-mr-enclave', action='store_true', + help="Only output MRENCLAVE") + args = parser.parse_args() + + fname = args.filename + p = Parser(fname) + + if args.only_mr_enclave: + # Only output MRENCLAVE. + sigstruct_pos = p.find_sigstruct_header() + sigstruct = p.sigstruct(sigstruct_pos) + mr_enclave = [v for k, v in sigstruct if k == 'enclavehash'][0] + sys.stdout.write(mr_enclave) + sys.exit(0) + + print('Enclave file: %s' % fname) + print('Enclave size: %d bytes' % p.size()) + sigstruct_pos = p.find_sigstruct_header() + if sigstruct_pos: + print('SIGSTRUCT found at %s' % hex(sigstruct_pos)) + else: + print('SIGSTRUCT not found. trying with weak header') + sigstruct_pos = p.find_weak_sigstruct_header() + if sigstruct_pos: + print('Weak sigstruct found at 0x%s' % hex(sigstruct_pos)) + else: + sys.exit(1) + + sigstruct = p.sigstruct(sigstruct_pos) + # print sigstruct + for k, v in sigstruct: + if isinstance(v, (bytes)): + print("%20s\t%s" % (k.upper(), hexlify(v))) + else: + print("%20s\t%d" % (k.upper(), v)) + + print('\n') + print('# ATTRIBUTES\n') + attrs = p.attributes(sigstruct[12][1]) + # print attributes + print('%20s\t%d' % ('DEBUG', attrs[1][1])) + print('%20s\t%d' % ('MODE64BIT', attrs[2][1])) + print('%20s\t%d' % ('PROVISIONKEY', attrs[4][1])) + print('%20s\t%d' % ('EINITTOKEN', attrs[5][1])) + + # now, let's parse sgxmeta section + sgxmeta_pos = p.find_sgxmeta_header() + if sgxmeta_pos: + sgxmeta = p.sgxmeta(sgxmeta_pos) + print('\n# sgxmeta found at 0x%s\n' % hex(sgxmeta_pos)) + for k, v in sgxmeta: + if isinstance(v, (long, int)): + print('%20s\t%d' % (k.upper(), v)) + else: + print('%20s\t%s' % (k.upper(), hexlify(v))) + else: + print('\n# sgxmeta not found') + + # locating ECALLs table + epos = p.find_ecall_table() + if epos: + print('\n# ECALLs table found at 0x%x' % epos) + necalls, = unpack("/dev/null | grep ENCLAVEHASH | cut -f2) + cargo run -p "$contract-client" -- --mr-enclave "$mr_enclave" "$@" +else + contract="$1" + shift 1 + cargo run -p ekiden-compute "$PROJ_ROOT/target/enclave/$contract.signed.so" -- "$@" +fi diff --git a/scripts/sgx-enter-hw.sh b/scripts/sgx-enter-hw.sh new file mode 100755 index 00000000000..52e144633c8 --- /dev/null +++ b/scripts/sgx-enter-hw.sh @@ -0,0 +1,36 @@ +#!/bin/bash -e + +# Working directory is determined by using git, so we can use the same script +# with external repositories which use their own root. +WORK_DIR=$( git rev-parse --show-toplevel ) +# Name of the ekiden container. +EKIDEN_CONTAINER_NAME=${EKIDEN_CONTAINER_NAME:-$(basename ${WORK_DIR})} + +ekiden_image=${EKIDEN_DOCKER_IMAGE:-ekiden/development:0.1.0-alpha.0} +ekiden_shell=${EKIDEN_DOCKER_SHELL:-bash} + +which docker >/dev/null || { + echo "ERROR: Please install Docker first." + exit 1 +} + +# Start SGX Rust Docker container. +if [ ! "$(docker ps -q -f name=${EKIDEN_CONTAINER_NAME})" ]; then + if [ "$(docker ps -aq -f name=${EKIDEN_CONTAINER_NAME})" ]; then + docker start ${EKIDEN_CONTAINER_NAME} + docker exec -i -t ${EKIDEN_CONTAINER_NAME} /usr/bin/env $ekiden_shell + else + # privileged for aesmd + docker run -t -i \ + --privileged \ + --name "${EKIDEN_CONTAINER_NAME}" \ + -v ${WORK_DIR}:/code \ + -e "SGX_MODE=HW" \ + -e "INTEL_SGX_SDK=/opt/sgxsdk" \ + -w /code \ + "$ekiden_image" \ + /usr/bin/env $ekiden_shell + fi +else + docker exec -i -t ${EKIDEN_CONTAINER_NAME} /usr/bin/env $ekiden_shell +fi diff --git a/scripts/sgx-enter.sh b/scripts/sgx-enter.sh new file mode 100755 index 00000000000..e41f9e3a4c2 --- /dev/null +++ b/scripts/sgx-enter.sh @@ -0,0 +1,34 @@ +#!/bin/bash -e + +# Working directory is determined by using git, so we can use the same script +# with external repositories which use their own root. +WORK_DIR=$( git rev-parse --show-toplevel ) +# Name of the ekiden container. +EKIDEN_CONTAINER_NAME=${EKIDEN_CONTAINER_NAME:-$(basename ${WORK_DIR})} + +ekiden_image=${EKIDEN_DOCKER_IMAGE:-ekiden/development:0.1.0-alpha.0} +ekiden_shell=${EKIDEN_DOCKER_SHELL:-bash} + +which docker >/dev/null || { + echo "ERROR: Please install Docker first." + exit 1 +} + +# Start SGX Rust Docker container. +if [ ! "$(docker ps -q -f name=${EKIDEN_CONTAINER_NAME})" ]; then + if [ "$(docker ps -aq -f name=${EKIDEN_CONTAINER_NAME})" ]; then + docker start ${EKIDEN_CONTAINER_NAME} + docker exec -i -t ${EKIDEN_CONTAINER_NAME} /usr/bin/env $ekiden_shell + else + docker run -t -i \ + --name "${EKIDEN_CONTAINER_NAME}" \ + -v ${WORK_DIR}:/code \ + -e "SGX_MODE=SIM" \ + -e "INTEL_SGX_SDK=/opt/sgxsdk" \ + -w /code \ + "$ekiden_image" \ + /usr/bin/env $ekiden_shell + fi +else + docker exec -i -t ${EKIDEN_CONTAINER_NAME} /usr/bin/env $ekiden_shell +fi diff --git a/scripts/show-profile.py b/scripts/show-profile.py new file mode 100755 index 00000000000..847e17d2919 --- /dev/null +++ b/scripts/show-profile.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +import argparse + +import numpy as np + +EKIDEN_PROFILE_PREFIX = 'ekiden-profile:' + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=None) + parser.add_argument('profile', type=str, + help="Profile output file") + parser.add_argument('--sort', type=str, default='-total', + help="Sort key and order") + parser.add_argument('--columns', type=str, default='name,total', + help="Comma-separated list of columns") + args = parser.parse_args() + + # Extract data from profile. + data = {} + with open(args.profile) as profile: + for line in profile: + if not line.startswith(EKIDEN_PROFILE_PREFIX): + continue + + line = line[len(EKIDEN_PROFILE_PREFIX):] + function, duration = line.split('=') + sec, nsec = duration.split(',') + duration = int(sec) * 10**9 + int(nsec) + + data.setdefault(function, []).append(duration) + + # Process data. + processed = [] + for function, durations in data.items(): + processed.append({ + 'name': function, + 'total': np.sum(durations), + 'mean': int(np.mean(durations)), + 'min': np.min(durations), + 'max': np.max(durations), + 'std': int(np.std(durations)), + }) + + # Show data. + descending = args.sort[0] == '-' + if descending: + sort_key = args.sort[1:] + else: + sort_key = args.sort + + columns = args.columns.split(',') + columns_format = ' '.join(['{{{}}}'.format(column) for column in columns]) + + # Determine column widths. + column_widths = {} + for function in processed: + for column in columns: + column_width = len(str(function[column])) + column_widths[column] = max(column_widths.get(column, len(column)), column_width) + + header = {} + for column in columns: + header[column] = column.ljust(column_widths[column]) + + print(columns_format.format(**header)) + + for function in sorted(processed, key=lambda data: data[sort_key], reverse=descending): + row = {} + for column in columns: + row[column] = str(function[column]).ljust(column_widths[column]) + + print(columns_format.format(**row)) diff --git a/scripts/start-aesmd.sh b/scripts/start-aesmd.sh new file mode 100644 index 00000000000..bcb7d5e3743 --- /dev/null +++ b/scripts/start-aesmd.sh @@ -0,0 +1 @@ +su -s /bin/sh -c 'exec /opt/intel/sgxpsw/aesm/aesm_service --no-daemon' aesmd & diff --git a/scripts/tendermint-clear.sh b/scripts/tendermint-clear.sh new file mode 100755 index 00000000000..0b0008c413d --- /dev/null +++ b/scripts/tendermint-clear.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +DATA_PATH="/tmp/tendermint" +GENESIS_PATH=$DATA_PATH/genesis.json +IMAGE_TAG=tendermint/tendermint:0.13.0 + +# Check to see if docker is on the path +if [ ! $(which docker) ]; then + echo "Please install docker" + exit 1 +fi + +# Clear the data directory +if [ -f $GENESIS_PATH ]; then + echo "Clearing Tendermint directory" + docker run -it --rm -v "$DATA_PATH:/tendermint" $IMAGE_TAG unsafe_reset_all +else + echo "Cannot recognize Tendermint directory" +fi + diff --git a/scripts/tendermint-start.sh b/scripts/tendermint-start.sh new file mode 100755 index 00000000000..cfcdb8e5e9a --- /dev/null +++ b/scripts/tendermint-start.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +CWD=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd ) +DATA_PATH="/tmp/tendermint" +GENESIS_PATH=${DATA_PATH}/genesis.json +IMAGE_TAG=tendermint/tendermint:0.13.0 + +# Check to see if docker is on the path +if [ ! $(which docker) ]; then + echo "Please install docker" + exit 1 +fi + +# Initialize the data directory +if [ -f $GENESIS_PATH ]; then echo "Tendermint directory already initialized" +else + echo "Initializing Tendermint data directory" + docker run -it --rm -v "${DATA_PATH}:/tendermint" $IMAGE_TAG init +fi + +# Start +docker run -it --rm \ + --name "tendermint" \ + --network container:ekiden \ + -v "${DATA_PATH}:/tendermint" \ + $IMAGE_TAG node \ + --consensus.create_empty_blocks=false \ + --rpc.laddr tcp://0.0.0.0:46666 \ + --rpc.grpc_laddr tcp://0.0.0.0:46657 diff --git a/testnet/contract_benchmarks/.gitignore b/testnet/contract_benchmarks/.gitignore new file mode 100644 index 00000000000..e0f2660266a --- /dev/null +++ b/testnet/contract_benchmarks/.gitignore @@ -0,0 +1,2 @@ +generated/* +results/* \ No newline at end of file diff --git a/testnet/contract_benchmarks/Makefile b/testnet/contract_benchmarks/Makefile new file mode 100644 index 00000000000..a7f8d224ea7 --- /dev/null +++ b/testnet/contract_benchmarks/Makefile @@ -0,0 +1,53 @@ +SHELL := /bin/bash + +# List of experiments. If adding a new experiment, the name below must match the enclave binary name. +EXPERIMENTS := token ethtoken dp-credit-scoring iot-learner + +create: check-valid-experiment + @echo "Preparing '$(experiment)' experiment" + @mkdir -p generated + @echo $(experiment) > generated/.experiment_name + + # Prepare secrets. + @echo "==> Preparing secrets" + @kubectl create secret generic ias \ + --from-file=spid=../../keys/attestation/spid \ + --from-file=pkcs12=../../keys/attestation/client.pfx + + # Generate token file. + @echo "==> Generating generated/$(experiment).yaml file" + @echo "### Note: This file was auto-generated from ../custer.yaml." > generated/$(experiment).yaml + @sed 's/$$BENCHMARK/$(experiment)/g' cluster.yaml | tail -n +3 >> generated/$(experiment).yaml + + @echo "==> Deploying nodes for benchmark" + @kubectl create -f generated/$(experiment).yaml + +destroy: + $(eval experiment = `cat generated/.experiment_name`) + @echo "Destroying $(experiment) experiment" + + @echo "==> Destroying nodes" + @kubectl delete --ignore-not-found=true --now=true --force=true -f generated/$(experiment).yaml + @kubectl delete --ignore-not-found=true --now=true --force=true pvc -l app=ekiden-benchmark + @kubectl delete --ignore-not-found=true --now=true --force=true pod ekiden-$(experiment)-benchmark + + @echo "==> Destroying secrets" + @kubectl delete --ignore-not-found=true --now=true --force=true secret ias + +benchmark: + $(eval experiment = `cat generated/.experiment_name`) + @echo "==> Running benchmarks for $(experiment)" + @./benchmark.sh $(experiment) + +.PHONY: create destroy benchmark + +check-valid-experiment: + @ if [ "$(experiment)" = "" ]; then \ + echo "You forgot to specify experiment=[$(EXPERIMENTS)]"; \ + exit 1; \ + fi + + @ if ! [[ "$(EXPERIMENTS)" =~ "$(experiment)" ]]; then \ + echo "Invalid experiment name '$(experiment)'. Valid values are: $(EXPERIMENTS)"; \ + exit 1; \ + fi diff --git a/testnet/contract_benchmarks/README.md b/testnet/contract_benchmarks/README.md new file mode 100644 index 00000000000..20c91c09a0b --- /dev/null +++ b/testnet/contract_benchmarks/README.md @@ -0,0 +1,66 @@ +# Ekiden Testnet for contract benchmarking + +This is a simple Ekiden testnet implemented using a single Kubernetes cluster. You can deploy it on a local Kubernetes installation by using [minikube](https://github.com/kubernetes/minikube) (see link for installation instructions). + +Once you have your Kubernetes installation running and `kubectl` installed you can use the following commands: + +To deploy: +```bash +$ make create experiment=[experiment name] +``` + +Where the experiment name is one of `token`, `ethtoken`, `dp-credit-scoring`, `iot-learner`. + +Before running benchmarks on the cluster, one of the nodes should be tagged to run the benchmark client. If no node +is tagged, running the following command will fail with an instruction on how to tag a node. The reason for this is +to ensure that different benchmarks are run in a consistent manner as otherwise Kubernetes may schedule the benchmark +client on an arbitrary node. + +To run benchmarks on the cluster: +```bash +$ make benchmark +``` + +To destroy: +```bash +$ make destroy +``` + +Note that the destroy command may take some time to complete and may return a timeout. In this case, just run it again and wait until it completes successfully. + +## Getting the Ekiden compute node IP and port + +If you are using minikube, you can use the following command to get the correct IP and port you need to point your Ekiden client to: +```bash +$ minikube service --url ekiden-[experiment name]-proxy +``` + +## Running the benchmark client + +To run a simple benchmark against the testnet for the `token` contract, build the client with the `benchmark` feature enabled (note that for some reason this doesn't work when called from the workspace using `-p token-client`): +``` +$ cd /code/clients/token +$ cargo run --features benchmark -- --benchmark-runs 100 --benchmark-threads 4 --mr-enclave --host --port +``` + +Where `host` and `port` are values obtained from `minikube service` as above. + +You can adapt these instructions to run the benchmark for other contracts. + +## Building the ekiden/core image + +The testnet uses the `ekiden/core` Docker image, which contains prebuilt Ekiden binaries and contracts. In order to (re)build this Docker image, you can run the following command in the top-level Ekiden directory: +```bash +$ ./docker/deployment/build-images.sh +``` + +This will build `ekiden/core` locally and you can then push the image to your preferred registry. + +## Deploying on AWS + +Using [kops](https://github.com/kubernetes/kops/blob/master/docs/aws.md) is recommended to set up a Kubernetes cluster on AWS. + +To set up AWS in multiple availability zones and use 4 nodes, set up the cluster as following: +```bash +$ kops create cluster --zones us-west-2a,us-west-2b,us-west-2c --node-count 4 ${NAME} +``` diff --git a/testnet/contract_benchmarks/benchmark.sh b/testnet/contract_benchmarks/benchmark.sh new file mode 100755 index 00000000000..2e2697cd104 --- /dev/null +++ b/testnet/contract_benchmarks/benchmark.sh @@ -0,0 +1,106 @@ +#!/bin/bash -e + +if [ -z "$1" ] + then + echo "Usage: $0 [experiment name]" + exit 1 +fi +EXPERIMENT="$1" + +# Set benchmark binaries to run. +# IMPORTANT: These binaries must exist in the ekiden/core image! +case $EXPERIMENT in + "token") + BENCHMARK_BINARIES="benchmark-token-get-balance benchmark-token-transfer" + ;; + "ethtoken") + BENCHMARK_BINARIES="benchmark-ethtoken-get-balance benchmark-ethtoken-transfer" + ;; + "dp-credit-scoring") + BENCHMARK_BINARIES="benchmark-dp-credit-scoring-infer benchmark-dp-credit-scoring-train" + ;; + "iot-learner") + BENCHMARK_BINARIES="benchmark-iot-learner-infer benchmark-iot-learner-train" + ;; + *) + echo "Unrecognized experiment name: ${EXPERIMENT}" + exit 1 +esac + +# Number of threads to run. Note that valid values depend on configuration of the +# 'contract' container in token.yaml. +THREADS="8 16 32" +# Number of runs to execute per thread. +RUNS="1000" +# Target node. +TARGET="ekiden-benchmark-1" +# Node placement condition based on labels. +NODE_LABEL_KEY="experiments" +NODE_LABEL_VALUE="client" +# Results output file. +OUTPUT="${EXPERIMENT}.$(date --iso-8601=ns).txt" + +# Helper logger function. +log() { + echo $* | tee -a "results/${OUTPUT}" +} + +# Helper function for running an Ekiden benchmark. +benchmark() { + local script=$* + + kubectl run ekiden-${EXPERIMENT}-benchmark \ + --attach \ + --rm \ + --overrides='{"apiVersion": "v1", "spec": {"nodeSelector": {"'${NODE_LABEL_KEY}'": "'${NODE_LABEL_VALUE}'"}}}' \ + --command \ + --quiet \ + --image=ekiden/core:latest \ + --image-pull-policy=Always \ + --restart=Never \ + -- bash -c "${script}" | tee -a "results/${OUTPUT}" +} + +# Check if any node is tagged. +if [ -z "$(kubectl get nodes -l "${NODE_LABEL_KEY} == ${NODE_LABEL_VALUE}" -o name)" ]; then + echo "ERROR: No nodes are tagged to run the benchmark client." + echo "" + echo "Use the following command to tag a node first:" + echo " kubectl label nodes ${NODE_LABEL_KEY}=${NODE_LABEL_VALUE}" + echo "" + echo "The following nodes are available:" + kubectl get nodes + echo "" + echo "Current pod placements are as follows:" + kubectl get pods -o wide + echo "" + exit 1 +fi + +echo "Results will be written to: results/${OUTPUT}" +mkdir -p results + +log "Starting benchmarks at $(date --iso-8601=seconds)." + +# Run benchmarks. +for benchmark in ${BENCHMARK_BINARIES}; do + log "------------------------------ ${benchmark} ------------------------------" + + for threads in ${THREADS}; do + log "Benchmarking with ${threads} thread(s)." + sleep 5 + + MRENCLAVE_CMD="\$(cat /ekiden/lib/${EXPERIMENT}.mrenclave)" + + benchmark \ + ${benchmark} \ + --benchmark-threads ${threads} \ + --benchmark-runs ${RUNS} \ + --host ${TARGET}.ekiden-benchmark.default.svc.cluster.local \ + --mr-enclave ${MRENCLAVE_CMD} + + log "" + done +done + +log "Benchmarks finished at $(date --iso-8601=seconds)." diff --git a/testnet/contract_benchmarks/cluster.yaml b/testnet/contract_benchmarks/cluster.yaml new file mode 100644 index 00000000000..23bbe35d753 --- /dev/null +++ b/testnet/contract_benchmarks/cluster.yaml @@ -0,0 +1,265 @@ +### Note: This file is a template. The create target of the Makefile uses this +### template to generate a custom yaml for each experiment. +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: ekiden-benchmark + labels: + app: ekiden-benchmark +spec: + ports: + - port: 46656 + name: tendermint-p2p + clusterIP: None + selector: + app: ekiden-benchmark +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tendermint-config +data: + seeds: "ekiden-benchmark-0,ekiden-benchmark-1,ekiden-benchmark-2,ekiden-benchmark-3" + validators: "ekiden-benchmark-0,ekiden-benchmark-1,ekiden-benchmark-2,ekiden-benchmark-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2018-01-25T00:00:00.000Z", + "chain_id": "ekiden-benchmark-test-net", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: ekiden-benchmark-budget +spec: + selector: + matchLabels: + app: ekiden-benchmark + minAvailable: 2 +--- +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: ekiden-benchmark +spec: + selector: + matchLabels: + app: ekiden-benchmark + serviceName: ekiden-benchmark + replicas: 4 + template: + metadata: + labels: + app: ekiden-benchmark + spec: + initContainers: + # An initialization container, which generates the validator key pair if it doesn't yet + # exist. The public part is extracted and copied to /tendermint/pub_key.json, so it can + # be served by the public-key container. This allows nodes to discover public keys of + # each other. + - name: generate-validator + image: tendermint/tendermint:0.13.0 + imagePullPolicy: IfNotPresent + command: + - bash + - "-c" + - | + set -ex + if [ ! -f /tendermint/priv_validator.json ]; then + tendermint gen_validator > /tendermint/priv_validator.json + # pub_key.json will be served by public-key container + cat /tendermint/priv_validator.json | jq ".pub_key" > /tendermint/pub_key.json + fi + volumeMounts: + - name: tmdir + mountPath: /tendermint + + containers: + # Container serving the Tendermint node's public key. + - name: public-key + imagePullPolicy: IfNotPresent + image: nginx:latest + ports: + - containerPort: 80 + name: public-key + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: tmconfigdir + subPath: pub_key_nginx.conf + + # Container running a Tendermint node. + - name: tendermint + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.13.0 + ports: + - containerPort: 46656 + name: tendermint-p2p + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tendermint-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tendermint-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tendermint-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # Copy genesis file template. + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # Fill genesis file with validators. + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # Wait until validator generates priv/pub key pair. + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # Add validator to genesis file along with its pub_key. + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, power: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # Construct seeds. + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:46656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node \ + --p2p.seeds="$seeds" \ + --moniker="`hostname`" \ + --consensus.create_empty_blocks=false \ + --rpc.laddr tcp://0.0.0.0:46666 \ + --rpc.grpc_laddr tcp://0.0.0.0:46657 + + volumeMounts: + - name: tmdir + mountPath: /tendermint + - name: tmconfigdir + mountPath: /etc/tendermint/genesis.json + subPath: genesis.json + + # Container running an Ekiden consensus node. + - name: consensus + imagePullPolicy: Always + image: ekiden/core:latest + command: + - bash + - "-c" + - | + ekiden-consensus \ + --tendermint-host localhost \ + --tendermint-port 46657 \ + --tendermint-abci-port 46658 \ + --grpc-port 9002 + + # Container running an Ekiden key manager contract. + - name: key-manager + imagePullPolicy: Always + image: ekiden/core:latest + command: + - bash + - "-c" + - | + ekiden-compute \ + --grpc-threads 4 \ + --port 9003 \ + --disable-key-manager \ + --consensus-host disabled \ + /ekiden/lib/ekiden-key-manager.signed.so + volumeMounts: + - name: ias + mountPath: /ias + readOnly: true + + # Container running an Ekiden contract. + # TODO: Currently all compute nodes must use the same key manager. + - name: contract + imagePullPolicy: Always + image: ekiden/core:latest + command: + - bash + - "-c" + - | + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + + ekiden-compute \ + --grpc-threads 128 \ + --port 9001 \ + --key-manager-host ekiden-benchmark-0.${fqdn_suffix} \ + --key-manager-port 9003 \ + /ekiden/lib/$BENCHMARK.signed.so + volumeMounts: + - name: ias + mountPath: /ias + readOnly: true + + volumes: + - name: tmconfigdir + configMap: + name: tendermint-config + - name: ias + secret: + secretName: ias + + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/testnet/ethermint/Makefile b/testnet/ethermint/Makefile new file mode 100644 index 00000000000..148ade58766 --- /dev/null +++ b/testnet/ethermint/Makefile @@ -0,0 +1,14 @@ +create: + @echo "==> Deploying nodes" + @kubectl create -f ethermint.yaml + +destroy: + @echo "==> Destroying nodes" + @kubectl delete --ignore-not-found=true --now=true --force=true -f ethermint.yaml + @kubectl delete --ignore-not-found=true --now=true --force=true pvc -l app=ethermint + +benchmark: + @echo "==> Running benchmarks" + @./benchmark.sh + +.PHONY: create destroy benchmark diff --git a/testnet/ethermint/benchmark.js b/testnet/ethermint/benchmark.js new file mode 100644 index 00000000000..1580afe0710 --- /dev/null +++ b/testnet/ethermint/benchmark.js @@ -0,0 +1,32 @@ +// Test account. This is defined in genesis.json and the keystore used during deployment. +var TEST_ACCOUNT = "0x7eff122b94897ea5b0e2a9abf47b86337fafebdc"; +var TEST_ACCOUNT_PASSWORD = "1234"; + +// Unlock test account. +console.log("Test account balance:", web3.eth.getBalance(TEST_ACCOUNT)); +console.log("Unlocking test account."); +web3.personal.unlockAccount(TEST_ACCOUNT, TEST_ACCOUNT_PASSWORD, 100000); + +// Deploy ERC20 contract. +console.log("Deploying ERC20 contract..."); +var initialSupply = "8" ; +var tokenName = "Ekiden Token" ; +var tokenSymbol = "EKI" ; +var tokenerc20Contract = web3.eth.contract([{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"}],"name":"approve","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_value","type":"uint256"}],"name":"burn","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_value","type":"uint256"}],"name":"burnFrom","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transfer","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"},{"name":"_extraData","type":"bytes"}],"name":"approveAndCall","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"},{"name":"","type":"address"}],"name":"allowance","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[{"name":"initialSupply","type":"uint256"},{"name":"tokenName","type":"string"},{"name":"tokenSymbol","type":"string"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Burn","type":"event"}]); +var tokenerc20 = tokenerc20Contract.new( + initialSupply, + tokenName, + tokenSymbol, + { + from: web3.eth.accounts[0], + data: '0x60606040526012600260006101000a81548160ff021916908360ff16021790555034156200002c57600080fd5b604051620012263803806200122683398101604052808051906020019091908051820191906020018051820191905050600260009054906101000a900460ff1660ff16600a0a8302600381905550600354600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508160009080519060200190620000d8929190620000fb565b508060019080519060200190620000f1929190620000fb565b50505050620001aa565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200013e57805160ff19168380011785556200016f565b828001600101855582156200016f579182015b828111156200016e57825182559160200191906001019062000151565b5b5090506200017e919062000182565b5090565b620001a791905b80821115620001a357600081600090555060010162000189565b5090565b90565b61106c80620001ba6000396000f3006060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c681461027857806370a08231146102b357806379cc67901461030057806395d89b411461035a578063a9059cbb146103e8578063cae9ca511461042a578063dd62ed3e146104c7575b600080fd5b34156100ca57600080fd5b6100d2610533565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105d1565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba61065e565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610664565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610791565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b61029960048080359060200190919050506107a4565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102ea600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506108a8565b6040518082815260200191505060405180910390f35b341561030b57600080fd5b610340600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506108c0565b604051808215151515815260200191505060405180910390f35b341561036557600080fd5b61036d610ada565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103ad578082015181840152602081019050610392565b50505050905090810190601f1680156103da5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156103f357600080fd5b610428600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b78565b005b341561043557600080fd5b6104ad600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091905050610b87565b604051808215151515815260200191505060405180910390f35b34156104d257600080fd5b61051d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d05565b6040518082815260200191505060405180910390f35b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105c95780601f1061059e576101008083540402835291602001916105c9565b820191906000526020600020905b8154815290600101906020018083116105ac57829003601f168201915b505050505081565b600081600560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60035481565b6000600560008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482111515156106f157600080fd5b81600560008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540392505081905550610786848484610d2a565b600190509392505050565b600260009054906101000a900460ff1681565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156107f457600080fd5b81600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540392505081905550816003600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b60046020528060005260406000206000915090505481565b600081600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561091057600080fd5b600560008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561099b57600080fd5b81600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600560008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540392505081905550816003600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610b705780601f10610b4557610100808354040283529160200191610b70565b820191906000526020600020905b815481529060010190602001808311610b5357829003601f168201915b505050505081565b610b83338383610d2a565b5050565b600080849050610b9785856105d1565b15610cfc578073ffffffffffffffffffffffffffffffffffffffff16638f4ffcb1338630876040518563ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018481526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610c91578082015181840152602081019050610c76565b50505050905090810190601f168015610cbe5780820380516001836020036101000a031916815260200191505b5095505050505050600060405180830381600087803b1515610cdf57600080fd5b6102c65a03f11515610cf057600080fd5b50505060019150610cfd565b5b509392505050565b6005602052816000526040600020602052806000526040600020600091509150505481565b6000808373ffffffffffffffffffffffffffffffffffffffff1614151515610d5157600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610d9f57600080fd5b600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401111515610e2d57600080fd5b600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401905081600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a380600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054600460008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540114151561103a57fe5b505050505600a165627a7a723058203bafed5760efde99fcb61acb15325ddc460cea0e01ea8e59eef2ade620a3ee1e0029', + gas: '4700000', + }, + function (e, contract) { + console.log(e, contract); + + if (typeof contract.address !== 'undefined') { + console.log('Contract mined! address: ' + contract.address + ' transactionHash: ' + contract.transactionHash); + } + } +); diff --git a/testnet/ethermint/benchmark.sh b/testnet/ethermint/benchmark.sh new file mode 100755 index 00000000000..d0dddddbb54 --- /dev/null +++ b/testnet/ethermint/benchmark.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Helper function for running an Ekiden benchmark. +benchmark() { + kubectl run ethermint-benchmark \ + --stdin \ + --rm \ + --command \ + --quiet \ + --image=ethereum/client-go:latest \ + --restart=Never \ + -- sh -c " + cat > benchmark.js + + geth attach --exec \"loadScript('benchmark.js')\" http://ethermint-0.ethermint.default.svc.cluster.local:8545 + " +} + +benchmark < benchmark.js diff --git a/testnet/ethermint/ethermint.yaml b/testnet/ethermint/ethermint.yaml new file mode 100644 index 00000000000..4b6db1d2c5e --- /dev/null +++ b/testnet/ethermint/ethermint.yaml @@ -0,0 +1,280 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: ethermint + labels: + app: ethermint +spec: + ports: + - port: 46656 + name: tendermint-p2p + clusterIP: None + selector: + app: ethermint +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tendermint-config +data: + seeds: "ethermint-0,ethermint-1,ethermint-2,ethermint-3" + validators: "ethermint-0,ethermint-1,ethermint-2,ethermint-3" + validator.power: "10" + genesis.json: |- + { + "genesis_time": "2018-01-25T00:00:00.000Z", + "chain_id": "ethermint-test-net", + "validators": [], + "app_hash": "" + } + pub_key_nginx.conf: |- + server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + location /pub_key.json { root /usr/share/nginx/; } + } + ethermint-keystore.json: |- + { + "address":"7eff122b94897ea5b0e2a9abf47b86337fafebdc", + "crypto":{ + "cipher":"aes-128-ctr", + "ciphertext":"19de8a919e2f4cbdde2b7352ebd0be8ead2c87db35fc8e4c9acaf74aaaa57dad", + "cipherparams":{"iv":"ba2bd370d6c9d5845e92fbc6f951c792"}, + "kdf":"scrypt", + "kdfparams":{ + "dklen":32, + "n":262144, + "p":1, + "r":8, + "salt":"c7cc2380a96adc9eb31d20bd8d8a7827199e8b16889582c0b9089da6a9f58e84" + }, + "mac":"ff2c0caf051ca15d8c43b6f321ec10bd99bd654ddcf12dd1a28f730cc3c13730" + }, + "id":"f86a62b4-0621-4616-99af-c4b7f38fcc48", + "version":3 + } + ethermint-genesis.json: |- + { + "config": { + "chainId": 15, + "homesteadBlock": 0, + "eip155Block": 0, + "eip158Block": 0 + }, + "nonce": "0xdeadbeefdeadbeef", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x40", + "gasLimit": "0x8000000", + "alloc": { + "0x7eff122b94897ea5b0e2a9abf47b86337fafebdc": { "balance": "100000000000000" }, + "0xc6713982649D9284ff56c32655a9ECcCDA78422A": { "balance": "10000000000000000000000000000000000" } + } + } +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: ethermint-budget +spec: + selector: + matchLabels: + app: ethermint + minAvailable: 2 +--- +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: ethermint +spec: + selector: + matchLabels: + app: ethermint + serviceName: ethermint + replicas: 4 + template: + metadata: + labels: + app: ethermint + spec: + initContainers: + # An initialization container, which generates the validator key pair if it doesn't yet + # exist. The public part is extracted and copied to /tendermint/pub_key.json, so it can + # be served by the public-key container. This allows nodes to discover public keys of + # each other. + - name: generate-validator + image: tendermint/tendermint:0.13.0 + imagePullPolicy: IfNotPresent + command: + - bash + - "-c" + - | + set -ex + if [ ! -f /tendermint/priv_validator.json ]; then + tendermint gen_validator > /tendermint/priv_validator.json + # pub_key.json will be served by public-key container + cat /tendermint/priv_validator.json | jq ".pub_key" > /tendermint/pub_key.json + fi + volumeMounts: + - name: tmdir + mountPath: /tendermint + + containers: + # Container serving the Tendermint node's public key. + - name: public-key + imagePullPolicy: IfNotPresent + image: nginx:latest + ports: + - containerPort: 80 + name: public-key + command: + - bash + - "-c" + - | + set -ex + # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) + # => we cannot serve from /tendermint, so we copy the file + mkdir -p /usr/share/nginx + cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json + nginx -g "daemon off;" + volumeMounts: + - name: tmdir + mountPath: /tendermint + - mountPath: /etc/nginx/conf.d/pub_key.conf + name: tmconfigdir + subPath: pub_key_nginx.conf + + # Container running a Tendermint node. + - name: tendermint + imagePullPolicy: IfNotPresent + image: tendermint/tendermint:0.13.0 + ports: + - containerPort: 46656 + name: tendermint-p2p + env: + - name: SEEDS + valueFrom: + configMapKeyRef: + name: tendermint-config + key: seeds + - name: VALIDATOR_POWER + valueFrom: + configMapKeyRef: + name: tendermint-config + key: validator.power + - name: VALIDATORS + valueFrom: + configMapKeyRef: + name: tendermint-config + key: validators + - name: TMHOME + value: /tendermint + command: + - bash + - "-c" + - | + set -ex + + # Copy genesis file template. + cp /etc/tendermint/genesis.json /tendermint/genesis.json + + # Fill genesis file with validators. + IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" + fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') + for v in "${VALS_ARR[@]}"; do + # Wait until validator generates priv/pub key pair. + set +e + + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + while [ "$ERR" != 0 ]; do + sleep 5 + curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null + ERR=$? + done + set -e + + # Add validator to genesis file along with its pub_key. + curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, power: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json + cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json + rm pub_validator.json + done + + # Construct seeds. + IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" + seeds=() + for s in "${SEEDS_ARR[@]}"; do + seeds+=("$s.$fqdn_suffix:46656") + done + seeds=$(IFS=','; echo "${seeds[*]}") + + tendermint node \ + --p2p.seeds="$seeds" \ + --moniker="`hostname`" \ + --consensus.create_empty_blocks=false + + volumeMounts: + - name: tmdir + mountPath: /tendermint + - name: tmconfigdir + mountPath: /etc/tendermint/genesis.json + subPath: genesis.json + + # Container running an Ethermint node. + - name: ethermint + imagePullPolicy: IfNotPresent + image: tendermint/ethermint:develop + command: + - bash + - "-c" + - | + cd /ethermint-init + ethermint --datadir /ethermint init + + ethermint \ + --datadir /ethermint \ + --rpc \ + --rpcaddr=0.0.0.0 \ + --ws \ + --wsaddr=0.0.0.0 \ + --rpcapi eth,net,web3,personal,admin + + volumeMounts: + - name: emdir + mountPath: /ethermint + - mountPath: /ethermint-init/genesis.json + name: tmconfigdir + subPath: ethermint-genesis.json + - mountPath: /ethermint-init/keystore/UTC--2016-10-21T22-30-03.071787745Z--7eff122b94897ea5b0e2a9abf47b86337fafebdc + name: tmconfigdir + subPath: ethermint-keystore.json + + volumes: + - name: tmconfigdir + configMap: + name: tendermint-config + + + volumeClaimTemplates: + - metadata: + name: tmdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi + - metadata: + name: emdir + annotations: + volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/testnet/tendermint/.gitignore b/testnet/tendermint/.gitignore new file mode 100644 index 00000000000..f9f847670b1 --- /dev/null +++ b/testnet/tendermint/.gitignore @@ -0,0 +1,8 @@ +/ips.txt +/known_hosts +/ssh_config +/tendermint +*.json +*.pem +*.stamp +*.zip diff --git a/testnet/tendermint/Makefile b/testnet/tendermint/Makefile new file mode 100644 index 00000000000..8783e7f8dbb --- /dev/null +++ b/testnet/tendermint/Makefile @@ -0,0 +1,52 @@ +TM_VERSION := 0.13.0 +TM_SHA256SUM := 36d773d4c2890addc61cc87a72c1e9c21c89516921b0defb0edfebde719b4b85 + +prep: authorize.stamp connect + +stop: | connect + ./cmd.sh killall tendermint ekiden-consensus + +clear: | install-tendermint.stamp connect + ./cmd.sh ./tendermint unsafe_reset_all + +genesis.stamp: genesis.json | install-tendermint.stamp connect + ./upload_validator_keys.sh $(TM_VERSION) + touch $@ + +genesis.json: + ./create_validator_keys.sh $(TM_VERSION) + +install-consensus.stamp: ../../target/release/ekiden-consensus | connect + ./send.sh $< + touch $@ + +install-tendermint.stamp: tendermint | connect + ./send.sh $< + ./cmd.sh mkdir .tendermint + touch $@ + +connect: known_hosts ssh_config + +tendermint: tendermint_$(TM_VERSION)_linux_amd64.zip + unzip $< + touch $@ # mark as fresh + +tendermint_$(TM_VERSION)_linux_amd64.zip: + wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v$(TM_VERSION)/$@ + echo "$(TM_SHA256SUM) $@" | sha256sum -c + +ssh_config: create_ssh_config.sh ips.txt + ./$< + +known_hosts: create_known_hosts.sh ips.txt + ./$< + +authorize.stamp: authorize_security_groups.sh ips.txt + ./$< + touch $@ + +ips.txt: get_ips.sh launch.stamp + ./$< >ips-tmp.txt + mv ips-tmp.txt $@ + +.PHONY: prep connect diff --git a/testnet/tendermint/README.md b/testnet/tendermint/README.md new file mode 100644 index 00000000000..b5b5d09849e --- /dev/null +++ b/testnet/tendermint/README.md @@ -0,0 +1,105 @@ +# Instructions +## Setting up prerequisites +1. Install the AWS CLI tool. + They say you can do it with pip, using `pip install awscli --upgrade --user`. + Make sure you have `$HOME/.local/bin` in `PATH` if you install it that way. +2. Get yourself an [AWS access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) and enter it in `aws configure`. + Don't enter a default region, because we use many regions. +3. Get the SSH keys archive. + These aren't under version control. + Unpack the keys into this directory. + It should put the .pem files into the keys directory. +4. In the AWS EC2 console, in each region that the cluster occupies (currently only N. California), in the `tendermint-base` security group, add an inbound rule to allow your computer to connect through SSH and an inbound rule to allow your compute node to access the consensus node (TCP port 9002). + +## Starting the cluster +1. In the AWS EC2 console, in each region that the cluster occupies, in the `tendermint` security group, delete all the inbound rules. + (Should find a way to automate this.) +2. Start the EC2 instances belonging to the cluster (currently ekidentm-val{1,2,3} in N. California). + This dynamically assigns them IP addresses. +3. Locally, switch to the testnet/tendermint directory. + The scripts and configurations depend on this being the current working directory. +4. Run `touch launch.stamp` to indicate that the instances have been launched. + This marks cluster management targets as out of date, so that they will run. +5. Run `make connect`, which uses the AWS CLI tool to get the public IP addresses of the instances and sets up the ssh_config and known_hosts files. + This writes the public IP addresses to ips.txt. + The resulting ssh_config assigns simple names `val1`-`val3` so you can easily `ssh`/`scp` without worrying about IP address and key. + It also sets up opportunistic connection sharing so that we can avoid repeated SSH handshakes while running the experiments. + The get_ips.sh script has the instance IDs hardcoded, so update it if the instances change. + The create_ssh_config.sh script has a template that associates a region-specific key with each VM, so update it if the key associations change. + The create_known_hosts.sh script has a template with the host keys of the VMs hardcoded, so update it if the host keys change. +6. Run `make authorize.stamp`, which uses the AWS CLI tool to populate the `tendermint` security groups. + The authorize_security_groups.sh script has a list of regions in which to do this hardcoded, so update it if the regions change. + +## Connecting to the cluster +In a separate shell, run `. start_control.sh` (sourcing it, rather than fork-exec-ing it), which sets up background jobs to connect to each VM. +I recommend that you do this in a separate shell, so the jobs don't get mixed up with other things. +These connections will be used in subsequent operations. +If you don't do this, the commands will each connect individually, which can be a lot slower. + +To disconnect, run `kill $(jobs -p)` to kill the jobs. + +## Setting up the cluster +**This is already done**, but if we ever need to do it again, here's how. + +1. Run `make genesis.stamp` to download a copy of Tendermint and upload it to the VMs and set up validator keys and a blockchain. +2. Build the consensus node in release mode. +3. Run `make install-consensus.stamp` to upload the consensus node program to the VMs. + +## Running the network +In a separate shell, run `./run_servers.sh`. +This will block until the servers exit. + +## Stopping the network +Run `make stop`. +If you also want to erase the state, run `make clear` after that. +4. Run `./cmd.sh killall run` to kill the client, the primary, and the servers. + +# Utilities +## cmd.sh +``` +./cmd.sh command args... +``` +Run a command on all VMs in parallel. + +## cmd_serial.sh +``` +./cmd_serial.sh command args... +``` +Run a command on all VMs one at a time. + +## send.sh +``` +./send.sh file +``` +Upload a file to all VMs, into the home directory there, in parallel. + +# Handy commands +``` +killall ssh +``` +If you interrupt one of the utilities that does things in parallel, its jobs will still exist. +This kills SSH clients that may be out there in limbo. + +``` +./cmd.sh pgrep tendermint +``` +Check for Tendermint processes on all VMs. +Would also be useful to check for `consensus`. + +``` +./cmd_serial.sh true +``` +Test that each VM is responsive. + +# Adding VMs to the cluster +* Add instance ID to get_ips.sh +* Add entry to create_ssh_config.sh +* Add host key to create_known_hosts.sh +* Add lines and entries and stuff to all the scripts +* Upload Tendermint and consensus programs +* Somehow configure Tendermint, including setting up validator keys and peer seeds. + Probably just recreating all validator keys and the genesis.json would work. + +**If the added VMs cover more regions, then additionally:** +* If the VMs need new keys, add those to the keys archive +* Add entries to authorize_security_groups.sh diff --git a/testnet/tendermint/authorize_security_groups.sh b/testnet/tendermint/authorize_security_groups.sh new file mode 100755 index 00000000000..ddf4564fd2b --- /dev/null +++ b/testnet/tendermint/authorize_security_groups.sh @@ -0,0 +1,12 @@ +#!/bin/sh -eu + +{ + read val1 + read val2 + read val3 +} known_hosts +$val1 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIz9F2mciwJvwBHapxc0OJELhq0J7qWtrE0diBev2W/DCRjj/sIfJSYQBtaIsO7cxg3YAjekR8rMtuhfB9bzBFU= +$val2 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE/4R+LZYgv/HWw7GPUDbKU3WYEDJge+FhepmPdT064sAwLdU0EE7k8F7Lzw9VWIis7lz8aB6A9Zmv6tdkAPugo= +$val3 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNGNXATGDPnQBp4hMpeZuLGuog0+SeDmj7kk6skSu4sLrb7j6Y14eDiPeKmgIsOAfQks6lIps3vHS6ZTjlpfMP0= +EOF diff --git a/testnet/tendermint/create_ssh_config.sh b/testnet/tendermint/create_ssh_config.sh new file mode 100755 index 00000000000..31417263d16 --- /dev/null +++ b/testnet/tendermint/create_ssh_config.sh @@ -0,0 +1,26 @@ +#!/bin/sh -eu + +{ + read val1 + read val2 + read val3 +} ssh_config +User ec2-user +UserKnownHostsFile ./known_hosts +ControlMaster auto +ControlPath ./control/%h + +Host val1 +HostName $val1 +IdentityFile ./keys/ekidentm-us-west-1.pem + +Host val2 +HostName $val2 +IdentityFile ./keys/ekidentm-us-west-1.pem + +Host val3 +HostName $val3 +IdentityFile ./keys/ekidentm-us-west-1.pem +EOF diff --git a/testnet/tendermint/create_validator_keys.sh b/testnet/tendermint/create_validator_keys.sh new file mode 100755 index 00000000000..95fde338d32 --- /dev/null +++ b/testnet/tendermint/create_validator_keys.sh @@ -0,0 +1,21 @@ +#!/bin/sh -eu +TM_VERSION=$1 +IMAGE_TAG=tendermint/tendermint:$TM_VERSION +VALIDATOR_POWER=10 + +cat >genesis.json < "validators/$n/priv_validator.json" + jq ".pub_key as \$k | {pub_key: \$k, power: $VALIDATOR_POWER, name: \"$n\"}" <"validators/$n/priv_validator.json" >"validators/$n/pub_validator.json" + jq ".validators |= .+ [$(cat validators/$n/pub_validator.json)]" tmpgenesis + mv tmpgenesis genesis.json +done diff --git a/testnet/tendermint/get_ips.sh b/testnet/tendermint/get_ips.sh new file mode 100755 index 00000000000..d5a8850322c --- /dev/null +++ b/testnet/tendermint/get_ips.sh @@ -0,0 +1,11 @@ +#!/bin/bash -eu +set -o pipefail +get_ip() { + region=$1 + inst=$2 + aws ec2 describe-instances --region "$region" --instance-ids "$inst" | jq -r '.Reservations[0].Instances[].PublicIpAddress' +} + +get_ip us-west-1 i-027047714e3581ec6 +get_ip us-west-1 i-0ba558494bad22313 +get_ip us-west-1 i-0cc82c78707f12e05 diff --git a/testnet/tendermint/keys/.keep b/testnet/tendermint/keys/.keep new file mode 100644 index 00000000000..478325a790e --- /dev/null +++ b/testnet/tendermint/keys/.keep @@ -0,0 +1 @@ +keep this dir. SSH private keys will go here. diff --git a/testnet/tendermint/run_servers.sh b/testnet/tendermint/run_servers.sh new file mode 100755 index 00000000000..1be8a7f781f --- /dev/null +++ b/testnet/tendermint/run_servers.sh @@ -0,0 +1,20 @@ +#!/bin/sh -eu + +{ + read val1 + read val2 + read val3 +} "] +description = "Ekiden build tools" +keywords = ["ekiden"] +repository = "https://github.com/ekiden/ekiden" + +[dependencies] +mktemp = "0.3.1" +cc = "1.0" +protoc = "1.4" +protoc-rust = "1.4" +protobuf = "1.4" +sgx_edl = { git = "https://github.com/ekiden/rust-sgx-sdk", tag = "v0.9.7-ekiden1" } +clap = "2.29.1" +ansi_term = "0.11" +toml = "0.4" +serde = "1.0" +serde_derive = "1.0" +ekiden-common = { path = "../common", version = "0.1.0-alpha.1" } + +[[bin]] +name = "cargo-ekiden" +path ="bin/main.rs" diff --git a/tools/Makefile.toml b/tools/Makefile.toml new file mode 100644 index 00000000000..8ee118dc949 --- /dev/null +++ b/tools/Makefile.toml @@ -0,0 +1 @@ +extend = "../Makefile.toml" diff --git a/tools/bin/main.rs b/tools/bin/main.rs new file mode 100644 index 00000000000..1f05a9392c9 --- /dev/null +++ b/tools/bin/main.rs @@ -0,0 +1,242 @@ +extern crate ansi_term; +#[macro_use] +extern crate clap; +extern crate mktemp; + +extern crate ekiden_common; +extern crate ekiden_tools; + +use std::env; +use std::fs::File; +use std::io::Write; +use std::path::Path; +use std::process::exit; + +use ansi_term::Colour::Red; +use clap::{App, Arg, ArgMatches, SubCommand}; + +use ekiden_common::error::{Error, Result}; +use ekiden_tools::{cargo, get_contract_identity}; +use ekiden_tools::contract::ContractBuilder; +use ekiden_tools::utils::SgxMode; + +/// Build an Ekiden contract. +fn build_contract(args: &ArgMatches) -> Result<()> { + let mut builder = match args.value_of("contract-crate") { + Some(crate_name) => ContractBuilder::new( + // Crate name. + crate_name.to_owned(), + // Output directory. + match args.value_of("output") { + Some(ref output) => Path::new(output).to_path_buf(), + None => env::current_dir()?, + }, + // Target directory. + None, + // Contract crate source. + { + if let Some(version) = args.value_of("version") { + Box::new(cargo::VersionSource { version: version }) + } else if let Some(git) = args.value_of("git") { + Box::new(cargo::GitSource { + repository: git, + branch: args.value_of("branch"), + tag: args.value_of("tag"), + rev: args.value_of("rev"), + }) + } else if let Some(path) = args.value_of("path") { + Box::new(cargo::PathSource { + path: Path::new(path).canonicalize()?, + }) + } else { + return Err(Error::new( + "Need to specify one of --version, --git or --path!", + )); + } + }, + )?, + None => { + // Invoke contract-build in the current project directory. + let project = cargo::ProjectRoot::discover()?; + let package = match project.get_package() { + Some(package) => package, + None => { + return Err(Error::new(format!( + "manifest path `{}` is a virtual manifest, but this command requires running \ + against an actual package in this workspace", + project.get_config_path().to_str().unwrap() + ))) + } + }; + + ContractBuilder::new( + package.name.clone(), + project.get_target_path().join("contract"), + Some(project.get_target_path()), + Box::new(cargo::PathSource { + path: project.get_path(), + }), + )? + } + }; + + // Configure builder. + builder + .verbose(true) + .release(args.is_present("release")) + .intel_sgx_sdk(Path::new(args.value_of("intel-sgx-sdk").unwrap())) + .sgx_mode(match args.value_of("sgx-mode") { + Some("HW") => SgxMode::Hardware, + _ => SgxMode::Simulation, + }) + .signing_key(args.value_of("sign-key")); + + // Build contract. + builder.build()?; + + // Output enclave identity when required. + if args.is_present("output-identity") { + let identity = get_contract_identity( + builder + .get_output_path() + .join(format!("{}.so", builder.get_crate_name())), + )?; + + // Hex encode identity. + let identity_file_path = builder + .get_output_path() + .join(format!("{}.mrenclave", builder.get_crate_name())); + let mut identity_file = File::create(&identity_file_path)?; + for byte in &identity { + write!(&mut identity_file, "{:02x}", byte)?; + } + } + + Ok(()) +} + +fn main() { + let matches = App::new("cargo") + .subcommand( + SubCommand::with_name("ekiden") + .about(crate_description!()) + .author(crate_authors!()) + .version(crate_version!()) + .subcommand( + SubCommand::with_name("build-contract") + .about("Build an Ekiden contract") + .arg( + Arg::with_name("contract-crate") + .help("Name of the Cargo crate containing the contract") + .takes_value(true), + ) + .arg( + Arg::with_name("version") + .help("Specify a version to build from crates.io") + .long("version") + .takes_value(true) + .conflicts_with("git") + .conflicts_with("path"), + ) + .arg( + Arg::with_name("git") + .help("Git URL to build the specified crate from") + .long("git") + .takes_value(true) + .conflicts_with("version") + .conflicts_with("path"), + ) + .arg( + Arg::with_name("branch") + .help("Branch to use when building from git") + .long("branch") + .takes_value(true) + .requires("git") + .conflicts_with("tag") + .conflicts_with("rev"), + ) + .arg( + Arg::with_name("tag") + .help("Tag to use when building from git") + .long("tag") + .takes_value(true) + .requires("git") + .conflicts_with("branch") + .conflicts_with("rev"), + ) + .arg( + Arg::with_name("rev") + .help("Specific commit to use when building from git") + .long("rev") + .takes_value(true) + .requires("git") + .conflicts_with("branch") + .conflicts_with("tag"), + ) + .arg( + Arg::with_name("path") + .help("Filesystem path to local crate to build") + .long("path") + .takes_value(true) + .conflicts_with("version") + .conflicts_with("git"), + ) + .arg( + Arg::with_name("release") + .long("release") + .help("Build contract in release mode, with optimizations"), + ) + .arg( + Arg::with_name("sgx-mode") + .help("SGX mode") + .long("sgx-mode") + .takes_value(true) + .env("SGX_MODE") + .required(true), + ) + .arg( + Arg::with_name("intel-sgx-sdk") + .help("Path to Intel SGX SDK") + .long("intel-sgx-sdk") + .takes_value(true) + .env("INTEL_SGX_SDK") + .required(true), + ) + .arg( + Arg::with_name("sign-key") + .help( + "Enclave signing key (if not specified, a default key is used)", + ) + .long("sign-key") + .takes_value(true), + ) + .arg( + Arg::with_name("output") + .help("Contract output directory") + .long("output") + .takes_value(true), + ) + .arg( + Arg::with_name("output-identity") + .help("Should a contract identity file be generated") + .long("output-identity"), + ), + ), + ) + .get_matches(); + + if let Some(ref ekiden_matches) = matches.subcommand_matches("ekiden") { + // Build contract. + if let Some(ref build_contract_matches) = + ekiden_matches.subcommand_matches("build-contract") + { + match build_contract(build_contract_matches) { + Ok(()) => {} + Err(error) => { + println!("{} {}", Red.bold().paint("error:"), error); + exit(128); + } + } + } + } +} diff --git a/tools/src/cargo.rs b/tools/src/cargo.rs new file mode 100644 index 00000000000..d49b1898aa2 --- /dev/null +++ b/tools/src/cargo.rs @@ -0,0 +1,236 @@ +//! Cargo-specific structures. +use std::env; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; + +use toml; + +use ekiden_common::error::{Error, Result}; + +/// Abstract crate source. +pub trait CrateSource { + /// Write a Cargo-compatible dependency spec to a given writer. Includes newline. + fn write_location(&self, writer: &mut Write) -> Result<()>; +} + +/// Git crate source. +#[derive(Debug)] +pub struct GitSource<'a> { + pub repository: &'a str, + pub branch: Option<&'a str>, + pub tag: Option<&'a str>, + pub rev: Option<&'a str>, +} + +impl<'a> CrateSource for GitSource<'a> { + fn write_location(&self, mut writer: &mut Write) -> Result<()> { + write!(&mut writer, "{{ git = \"{}\"", self.repository)?; + + if let Some(ref branch) = self.branch { + write!(&mut writer, ", branch = \"{}\"", branch)?; + } else if let Some(ref tag) = self.tag { + write!(&mut writer, ", tag = \"{}\"", tag)?; + } else if let Some(ref rev) = self.rev { + write!(&mut writer, ", rev = \"{}\"", rev)?; + } + + writeln!(&mut writer, " }}")?; + + Ok(()) + } +} + +/// Crates.io version crate source. +#[derive(Debug)] +pub struct VersionSource<'a> { + pub version: &'a str, +} + +impl<'a> CrateSource for VersionSource<'a> { + fn write_location(&self, mut writer: &mut Write) -> Result<()> { + writeln!(&mut writer, "\"{}\"", self.version)?; + + Ok(()) + } +} + +/// Local path crate source. +#[derive(Debug)] +pub struct PathSource { + pub path: PathBuf, +} + +impl CrateSource for PathSource { + fn write_location(&self, mut writer: &mut Write) -> Result<()> { + writeln!( + &mut writer, + "{{ path = \"{}\" }}", + self.path.to_str().unwrap() + )?; + + Ok(()) + } +} + +/// Cargo package metadata. +#[derive(Deserialize, Debug)] +pub struct Package { + pub name: String, + pub version: String, +} + +/// Cargo workspace metadata. +#[derive(Deserialize, Debug)] +pub struct Workspace { + members: Vec, +} + +/// Cargo manifest. +#[derive(Deserialize, Debug)] +pub struct Manifest { + package: Option, + workspace: Option, +} + +/// Cargo project root. +#[derive(Debug)] +pub struct ProjectRoot { + /// Path to the project root (directory containing Cargo.toml). + path: PathBuf, + /// Path to the workspace root. + workspace_path: PathBuf, + /// Parsed configuration file. + manifest: Manifest, +} + +impl ProjectRoot { + /// Attempts to discover the root of the current project. + pub fn discover() -> Result { + // Start with the current directory and recursively move up if Cargo.toml + // cannot be found in the given directory. + let mut current_dir: &Path = &env::current_dir()?; + loop { + if current_dir.join("Cargo.toml").exists() { + return Ok(ProjectRoot::new(current_dir.to_owned())?); + } + + if let Some(parent) = current_dir.parent() { + current_dir = parent; + } else { + // We've reached the root. + return Err(Error::new("failed to discover project root")); + } + } + } + + /// Parse Cargo manifest file. + fn parse_manifest>(path: P) -> Result { + // Parse configuration file. + let mut data = String::new(); + File::open(path)?.read_to_string(&mut data)?; + + Ok(toml::from_str(&data)?) + } + + /// Create new project root. + pub fn new(path: PathBuf) -> Result { + let manifest = Self::parse_manifest(path.join("Cargo.toml"))?; + let workspace_path = if manifest.workspace.is_some() { + // This is already a workspace. + path.clone() + } else { + // Discover the workspace. + let mut current_dir: &Path = &path; + loop { + let manifest_path = current_dir.join("Cargo.toml"); + if manifest_path.exists() { + let workspace_manifest = Self::parse_manifest(&manifest_path)?; + match workspace_manifest.workspace { + Some(ref workspace) => { + // Contains a workspace. Ensure that this workspace also contains + // the project root. + if !workspace + .members + .iter() + .any(|m| current_dir.join(m) == path) + { + return Err(Error::new(format!( + "current package believes it's in a workspace when it's not: \n\ + current: {}\n\ + workspace: {}\n\ + \n\ + this may be fixable by adding `{}` to the \ + `workspace.members` array of the manifest located at: {}", + path.join("Cargo.toml").to_str().unwrap(), + current_dir.to_str().unwrap(), + path.strip_prefix(current_dir).unwrap().to_str().unwrap(), + manifest_path.to_str().unwrap() + ))); + } + + break current_dir.to_owned(); + } + None => {} + } + } + + if let Some(parent) = current_dir.parent() { + current_dir = parent; + } else { + // We've reached the root. + return Err(Error::new("failed to discover project workspace")); + } + } + }; + + Ok(ProjectRoot { + path, + workspace_path, + manifest, + }) + } + + pub fn get_path(&self) -> PathBuf { + self.path.clone() + } + + /// Get project config path (Cargo.toml). + pub fn get_config_path(&self) -> PathBuf { + self.path.join("Cargo.toml") + } + + /// Get project target directory path. + pub fn get_target_path(&self) -> PathBuf { + if let Ok(path) = env::var("CARGO_TARGET_DIR") { + Path::new(&path).to_owned() + } else { + self.workspace_path.join("target") + } + } + + /// Parse project config (Cargo.toml). + pub fn get_config(&self) -> &Manifest { + &self.manifest + } + + /// Check if given project contains a package. + pub fn is_package(&self) -> bool { + self.manifest.package.is_some() + } + + /// Check if given project contains a workspace. + pub fn is_workspace(&self) -> bool { + self.manifest.workspace.is_some() + } + + /// Get package metadata. + pub fn get_package(&self) -> Option<&Package> { + self.manifest.package.as_ref() + } + + /// Get workspace metadata. + pub fn get_workspace(&self) -> Option<&Workspace> { + self.manifest.workspace.as_ref() + } +} diff --git a/tools/src/contract.rs b/tools/src/contract.rs new file mode 100644 index 00000000000..c4dc9df47ee --- /dev/null +++ b/tools/src/contract.rs @@ -0,0 +1,371 @@ +//! Ekiden contract builder. +use std::env; +use std::fs::{DirBuilder, File}; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use ansi_term::Colour::Green; +use mktemp::Temp; + +use ekiden_common::error::{Error, Result}; + +use super::cargo; +use super::utils::SgxMode; + +/// Xargo configuration file. +static XARGO_CONFIG: &'static str = include_str!("../../xargo/Xargo.toml.template"); +/// Xargo target descriptor. +static XARGO_TARGET: &'static str = include_str!("../../xargo/x86_64-unknown-linux-sgx.json"); +/// Linker version script. +static LINKER_VERSION_SCRIPT: &'static str = include_str!("../../core/edl/src/enclave.lds"); +/// Enclave self. +static ENCLAVE_CONFIG: &'static str = include_str!("../../core/edl/src/enclave.xml"); +/// Default enclave signing key. +static ENCLAVE_SIGNING_KEY: &'static str = include_str!("../../keys/private.pem"); + +/// Name of subdirectory in the target directory. +const TARGET_CONTRACT_DIR: &'static str = "contract"; + +/// Contract build configuration. +pub struct ContractBuilder<'a> { + /// Name of the crate being built. + crate_name: String, + /// Output directory path. + output_path: PathBuf, + /// Build directory path. + build_path: PathBuf, + /// Ownership over build directory path (directory is removed when dropped). + #[allow(dead_code)] + build_temporary_dir: Temp, + /// Target directory path. + target_path: PathBuf, + /// Source crate location. + source: Box, + /// Build verbosity. + verbose: bool, + /// Release mode. + release: bool, + /// Path to Intel SGX SDK. + intel_sgx_sdk: Option, + /// SGX build mode. + sgx_mode: SgxMode, + /// Signing key location. + signing_key: Option, +} + +impl<'a> ContractBuilder<'a> { + pub fn new( + crate_name: String, + output_path: PathBuf, + target_path: Option, + source: Box, + ) -> Result { + let build_temporary_dir = Temp::new_dir()?; + let build_path = build_temporary_dir.to_path_buf(); + + Ok(ContractBuilder { + crate_name, + output_path, + build_path: build_path.clone(), + build_temporary_dir, + target_path: target_path.unwrap_or(build_path.join("target")), + source, + verbose: false, + release: false, + intel_sgx_sdk: match env::var("INTEL_SGX_SDK") { + Ok(value) => Some(Path::new(&value).to_path_buf()), + Err(_) => None, + }, + sgx_mode: match env::var("SGX_MODE") { + Ok(ref value) => if value == "HW" { + SgxMode::Hardware + } else { + SgxMode::Simulation + }, + _ => SgxMode::Simulation, + }, + signing_key: None, + }) + } + + /// Get crate name. + pub fn get_crate_name(&self) -> &str { + self.crate_name.as_str() + } + + /// Get output path. + pub fn get_output_path(&self) -> &PathBuf { + &self.output_path + } + + /// Get build path. + pub fn get_build_path(&self) -> &PathBuf { + &self.build_path + } + + /// Get contract target path. + pub fn get_contract_target_path(&self) -> PathBuf { + self.target_path.join(TARGET_CONTRACT_DIR) + } + + /// Set builder verbosity. + pub fn verbose(&mut self, verbose: bool) -> &mut Self { + self.verbose = verbose; + self + } + + /// Set release mode. + pub fn release(&mut self, mode: bool) -> &mut Self { + self.release = mode; + self + } + + /// Set path to Intel SGX SDK. + /// + /// By default this will be configured based on the `INTEL_SGX_SDK` environment + /// variable. + pub fn intel_sgx_sdk>(&mut self, path: P) -> &mut Self { + self.intel_sgx_sdk = Some(path.into()); + self + } + + /// Set SGX build mode. + /// + /// By default this will be configured based on the `SGX_MODE` environment + /// variable. + pub fn sgx_mode(&mut self, mode: SgxMode) -> &mut Self { + self.sgx_mode = mode; + self + } + + /// Set SGX enclave signing key. + /// + /// By default a pre-defined signing key is used. + pub fn signing_key>(&mut self, key: Option

) -> &mut Self { + self.signing_key = match key { + Some(path) => Some(path.into()), + None => None, + }; + self + } + + /// Output progress update if verbose mode is enabled. + fn report_stage(&self, stage: &str) { + if !self.verbose { + return; + } + + println!( + "{} {}", + Green.bold().paint(format!("{:>12}", stage)), + self.crate_name, + ); + } + + /// Prepare and build the contract static library crate. + /// + /// This generates a temporary directory with a new crate that lists only + /// the source contract crate as a dependency. It is made so that it + /// generates a static library when built. + pub fn build_contract_crate(&self) -> Result<()> { + self.report_stage("Preparing"); + + // Prepare dummy crate. + let mut cargo_toml = File::create(&self.build_path.join("Cargo.toml"))?; + writeln!(&mut cargo_toml, "[package]")?; + writeln!(&mut cargo_toml, "name = \"contract_enclave\"")?; + writeln!(&mut cargo_toml, "version = \"0.0.0\"")?; + writeln!(&mut cargo_toml, "")?; + writeln!(&mut cargo_toml, "[lib]")?; + writeln!(&mut cargo_toml, "path = \"lib.rs\"")?; + writeln!(&mut cargo_toml, "crate-type = [\"staticlib\"]")?; + writeln!(&mut cargo_toml, "")?; + writeln!(&mut cargo_toml, "[dependencies]")?; + write!(&mut cargo_toml, "{} = ", self.crate_name)?; + self.source.write_location(&mut cargo_toml)?; + drop(cargo_toml); + + // Include Xargo configuration files. + let mut xargo_toml = File::create(&self.build_path.join("Xargo.toml"))?; + write!(&mut xargo_toml, "{}", XARGO_CONFIG)?; + drop(xargo_toml); + + let mut xargo_target = + File::create(&self.build_path.join("x86_64-unknown-linux-sgx.json"))?; + write!(&mut xargo_target, "{}", XARGO_TARGET)?; + drop(xargo_target); + + let mut lib_rs = File::create(&self.build_path.join("lib.rs"))?; + writeln!( + &mut lib_rs, + "extern crate {};", + self.crate_name.replace("-", "_") + )?; + + // Build the crate using Xargo to get the staticlib. + self.report_stage("Building"); + + let mut xargo = Command::new("xargo"); + xargo.arg("build"); + + if self.release { + xargo.arg("--release"); + } + + let xargo_status = xargo + .arg("--target") + .arg("x86_64-unknown-linux-sgx") + // TODO: Combine rustflags. + .env("RUSTFLAGS", "-Z force-unstable-if-unmarked") + .env("RUST_TARGET_PATH", &self.build_path) + .env("CARGO_TARGET_DIR", &self.target_path) + .current_dir(&self.build_path) + .status()?; + if !xargo_status.success() { + return Err(Error::new(format!( + "failed to build, xargo exited with status {}!", + xargo_status.code().unwrap() + ))); + } + + Ok(()) + } + + /// Link the generated static library with SGX libraries. + pub fn link_enclave(&self) -> Result<()> { + self.report_stage("Linking"); + + // Include linker version script. + let enclave_lds_path = self.build_path.join("enclave.lds"); + let mut enclave_lds = File::create(&enclave_lds_path)?; + write!(&mut enclave_lds, "{}", LINKER_VERSION_SCRIPT)?; + drop(enclave_lds); + + // Configure Intel SGX SDK path and library names. + let intel_sgx_sdk_lib_path = match self.intel_sgx_sdk { + Some(ref sdk) => sdk.join("lib64"), + None => return Err(Error::new("path to Intel SGX SDK not configured")), + }; + let (trts_library_name, service_library_name) = match self.sgx_mode { + SgxMode::Hardware => ("sgx_trts", "sgx_tservice"), + SgxMode::Simulation => ("sgx_trts_sim", "sgx_tservice_sim"), + }; + + // Determine enclave library path. + let library_path = if self.release { + self.target_path.join("x86_64-unknown-linux-sgx/release") + } else { + self.target_path.join("x86_64-unknown-linux-sgx/debug") + }; + + // Ensure contract target path is available. + DirBuilder::new() + .recursive(true) + .create(&self.get_contract_target_path())?; + + let gcc_status = Command::new("g++") + .arg("-Wl,--no-undefined") + .arg("-nostdlib") + .arg("-nodefaultlibs") + .arg("-nostartfiles") + .arg(&format!("-L{}", intel_sgx_sdk_lib_path.to_str().unwrap())) + .arg(&format!("-L{}", library_path.to_str().unwrap())) + // Trusted runtime group. + .arg("-Wl,--whole-archive") + .arg(&format!("-l{}", trts_library_name)) + .arg("-Wl,--no-whole-archive") + // Enclave library group. + .arg("-Wl,--start-group") + .arg("-lsgx_tstdc") + .arg("-lsgx_tstdcxx") + .arg("-lsgx_tcrypto") + .arg("-lsgx_tkey_exchange") + .arg(&format!("-l{}", service_library_name)) + .arg("-lcontract_enclave") + .arg("-Wl,--end-group") + .arg("-Wl,-Bstatic") + .arg("-Wl,-Bsymbolic") + .arg("-Wl,--no-undefined") + .arg("-Wl,-pie,-eenclave_entry") + .arg("-Wl,--export-dynamic") + .arg("-Wl,--defsym,__ImageBase=0") + // Require __ekiden_enclave symbol to be defined. + .arg("-Wl,--require-defined,__ekiden_enclave") + .arg("-Wl,--gc-sections") + .arg(&format!("-Wl,--version-script={}", enclave_lds_path.to_str().unwrap())) + .arg("-O2") + .arg("-o") + .arg(self.get_contract_target_path() + .join(format!("{}.unsigned.so", self.crate_name)).to_str().unwrap()) + .current_dir(&self.build_path) + .status()?; + if !gcc_status.success() { + return Err(Error::new(format!( + "failed to link, g++ exited with status {}!", + gcc_status.code().unwrap() + ))); + } + + Ok(()) + } + + /// Sign the generated enclave library. + pub fn sign_enclave(&self) -> Result<()> { + self.report_stage("Signing"); + + // Include enclave configuration. + let enclave_config_path = self.build_path.join("enclave.xml"); + let mut enclave_config = File::create(&enclave_config_path)?; + write!(&mut enclave_config, "{}", ENCLAVE_CONFIG)?; + drop(enclave_config); + + let signer_path = match self.intel_sgx_sdk { + Some(ref sdk) => sdk.join("bin/x64/sgx_sign"), + None => return Err(Error::new("path to Intel SGX SDK not configured")), + }; + + // Determine signing key. + let key_path = match self.signing_key { + Some(ref key) => key.clone(), + None => { + // Include default enclave signing key. + let enclave_key_path = self.build_path.join("enclave.pem"); + let mut enclave_key = File::create(&enclave_key_path)?; + write!(&mut enclave_key, "{}", ENCLAVE_SIGNING_KEY)?; + + enclave_key_path + } + }; + + let signer_status = Command::new(signer_path) + .arg("sign") + .arg("-key") + .arg(&key_path) + .arg("-enclave") + .arg(&self.get_contract_target_path() + .join(format!("{}.unsigned.so", self.crate_name))) + .arg("-out") + .arg(&self.output_path.join(format!("{}.so", self.crate_name))) + .arg("-config") + .arg(&enclave_config_path) + .status()?; + if !signer_status.success() { + return Err(Error::new(format!( + "failed to sign, sgx_sign exited with status {}!", + signer_status.code().unwrap() + ))); + } + + Ok(()) + } + + /// Performs all the contract build steps. + pub fn build(&self) -> Result<()> { + self.build_contract_crate()?; + self.link_enclave()?; + self.sign_enclave()?; + + Ok(()) + } +} diff --git a/tools/src/lib.rs b/tools/src/lib.rs new file mode 100644 index 00000000000..a3027df3a89 --- /dev/null +++ b/tools/src/lib.rs @@ -0,0 +1,23 @@ +#![feature(use_extern_macros)] + +extern crate ansi_term; +extern crate cc; +extern crate mktemp; +extern crate protobuf; +extern crate protoc; +extern crate protoc_rust; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate sgx_edl; +extern crate toml; + +extern crate ekiden_common; + +pub mod cargo; +pub mod contract; +pub mod utils; +pub use utils::*; + +// Re-export the define_edl macro from sgx_edl. +pub use sgx_edl::define_edl; diff --git a/tools/src/utils.rs b/tools/src/utils.rs new file mode 100644 index 00000000000..a26abe5484a --- /dev/null +++ b/tools/src/utils.rs @@ -0,0 +1,365 @@ +//! Ekiden build utilities. +use std::env; +use std::fs; +use std::io; +use std::io::prelude::*; +use std::path::Path; +use std::process::Command; + +use cc; +use mktemp; +use protobuf; +use protoc_rust; +use sgx_edl::EDL; + +use ekiden_common::error::{Error, Result}; + +/// Arguments for protoc. +#[derive(Debug, Default)] +pub struct ProtocArgs<'a> { + /// --lang_out= param + pub out_dir: &'a str, + /// -I args + pub includes: &'a [&'a str], + /// List of .proto files to compile + pub input: &'a [&'a str], +} + +/// SGX build mode. +pub enum SgxMode { + Hardware, + Simulation, +} + +/// Build part. +enum BuildPart { + Untrusted, + Trusted, +} + +/// Build configuration. +struct BuildConfiguration { + mode: SgxMode, + intel_sdk_dir: String, +} + +// Paths. +static EDGER8R_PATH: &'static str = "bin/x64/sgx_edger8r"; +static SGX_SDK_LIBRARY_PATH: &'static str = "lib64"; +static SGX_SDK_INCLUDE_PATH: &'static str = "include"; +static SGX_SDK_TLIBC_INCLUDE_PATH: &'static str = "include/tlibc"; +static SGX_SDK_STLPORT_INCLUDE_PATH: &'static str = "include/stlport"; +static SGX_SDK_EPID_INCLUDE_PATH: &'static str = "include/epid"; + +/// Get current build environment configuration. +fn get_build_configuration() -> BuildConfiguration { + // Ensure build script is restarted if any of the env variables changes. + println!("cargo:rerun-if-env-changed=SGX_MODE"); + println!("cargo:rerun-if-env-changed=INTEL_SGX_SDK"); + + BuildConfiguration { + mode: match env::var("SGX_MODE") + .expect("Please define SGX_MODE") + .as_ref() + { + "HW" => SgxMode::Hardware, + _ => SgxMode::Simulation, + }, + intel_sdk_dir: env::var("INTEL_SGX_SDK").expect("Please define INTEL_SGX_SDK"), + } +} + +/// Run edger8r tool from Intel SGX SDK. +fn edger8r( + config: &BuildConfiguration, + part: BuildPart, + output: &str, + edl: &Vec, +) -> io::Result<()> { + let edger8r_bin = Path::new(&config.intel_sdk_dir).join(EDGER8R_PATH); + + // Create temporary files with all EDLs and import all of them in the core EDL. + let edl_filename = Path::new(&output).join("enclave.edl"); + { + let mut enclave_edl_file = fs::File::create(&edl_filename)?; + writeln!(&mut enclave_edl_file, "enclave {{").unwrap(); + + for ref edl_item in edl { + let edl_item_filename = + Path::new(&output).join(format!("{}_{}", edl_item.namespace, edl_item.name)); + let mut edl_file = fs::File::create(&edl_item_filename)?; + edl_file.write_all(edl_item.data.as_bytes())?; + writeln!( + &mut enclave_edl_file, + "from \"{}_{}\" import *;", + edl_item.namespace, edl_item.name + ).unwrap(); + } + + writeln!(&mut enclave_edl_file, "}};").unwrap(); + } + + let status = Command::new(edger8r_bin.to_str().unwrap()) + .args(&["--search-path", output]) + .args(&[ + "--search-path", + Path::new(&config.intel_sdk_dir) + .join(SGX_SDK_INCLUDE_PATH) + .to_str() + .unwrap(), + ]) + .args(&match part { + BuildPart::Untrusted => ["--untrusted", "--untrusted-dir", &output], + BuildPart::Trusted => ["--trusted", "--trusted-dir", &output], + }) + .arg(edl_filename.to_str().unwrap()) + .status()?; + if !status.success() { + panic!("edger8r did not execute successfully."); + } + + Ok(()) +} + +/// Enable SGX features based on current mode. +pub fn detect_sgx_features() { + let config = get_build_configuration(); + + match config.mode { + SgxMode::Simulation => { + // Enable sgx-simulation feature. + println!("cargo:rustc-cfg=feature=\"sgx-simulation\""); + } + _ => {} + } +} + +/// Build the untrusted part of an Ekiden enclave. +pub fn build_untrusted(edl: Vec) { + let config = get_build_configuration(); + + // Create temporary directory to hold the built libraries. + let temp_dir = mktemp::Temp::new_dir().expect("Failed to create temporary directory"); + let temp_dir_path = temp_dir.to_path_buf(); + let temp_dir_name = temp_dir_path.to_str().unwrap(); + + // Generate proxy for untrusted part. + edger8r(&config, BuildPart::Untrusted, &temp_dir_name, &edl).expect("Failed to run edger8r"); + + // Build proxy. + cc::Build::new() + .file(temp_dir_path.join("enclave_u.c")) + .flag_if_supported("-m64") + .flag_if_supported("-O2") // TODO: Should be based on debug/release builds. + .flag_if_supported("-fPIC") + .flag_if_supported("-Wno-attributes") + .include(Path::new(&config.intel_sdk_dir).join(SGX_SDK_INCLUDE_PATH)) + .include(&temp_dir_name) + .compile("enclave_u"); + + println!("cargo:rustc-link-lib=static=enclave_u"); + println!( + "cargo:rustc-link-search=native={}", + Path::new(&config.intel_sdk_dir) + .join(SGX_SDK_LIBRARY_PATH) + .to_str() + .unwrap() + ); +} + +/// Build the trusted Ekiden SGX enclave. +pub fn build_trusted(edl: Vec) { + let config = get_build_configuration(); + + // Create temporary directory to hold the built libraries. + let temp_dir = mktemp::Temp::new_dir().expect("Failed to create temporary directory"); + let temp_dir_path = temp_dir.to_path_buf(); + let temp_dir_name = temp_dir_path.to_str().unwrap(); + + // Generate proxy for trusted part. + edger8r(&config, BuildPart::Trusted, &temp_dir_name, &edl).expect("Failed to run edger8r"); + + // Build proxy. + cc::Build::new() + .file(temp_dir_path.join("enclave_t.c")) + .flag_if_supported("-m64") + .flag_if_supported("-O2") // TODO: Should be based on debug/release builds. + .flag_if_supported("-nostdinc") + .flag_if_supported("-fvisibility=hidden") + .flag_if_supported("-fpie") + .flag_if_supported("-fstack-protector") + .include(Path::new(&config.intel_sdk_dir).join(SGX_SDK_INCLUDE_PATH)) + .include(Path::new(&config.intel_sdk_dir).join(SGX_SDK_TLIBC_INCLUDE_PATH)) + .include(Path::new(&config.intel_sdk_dir).join(SGX_SDK_STLPORT_INCLUDE_PATH)) + .include(Path::new(&config.intel_sdk_dir).join(SGX_SDK_EPID_INCLUDE_PATH)) + .include(&temp_dir_name) + .compile("enclave_t"); + + println!("cargo:rustc-link-lib=static=enclave_t"); +} + +/// Generate Rust code for Protocol Buffer messages. +pub fn protoc(args: ProtocArgs) { + // Run protoc. + protoc_rust::run(protoc_rust::Args { + out_dir: args.out_dir, + includes: args.includes, + input: args.input, + }).expect("Failed to run protoc"); + + // Output descriptor of the generated files into a temporary file. + let temp_dir = mktemp::Temp::new_dir().expect("Failed to create temporary directory"); + let temp_file = temp_dir.to_path_buf().join("descriptor.pbbin"); + let temp_file = temp_file.to_str().expect("utf-8 file name"); + + let protoc = super::protoc::Protoc::from_env_path(); + + protoc + .write_descriptor_set(super::protoc::DescriptorSetOutArgs { + out: temp_file, + includes: args.includes, + input: args.input, + include_imports: true, + }) + .unwrap(); + + let mut fds = Vec::new(); + let mut file = fs::File::open(temp_file).unwrap(); + file.read_to_end(&mut fds).unwrap(); + + drop(file); + drop(temp_dir); + + let fds: protobuf::descriptor::FileDescriptorSet = protobuf::parse_from_bytes(&fds).unwrap(); + + // Generate Ekiden-specific impls for all messages. + for file in fds.get_file() { + let out_filename = Path::new(&args.out_dir) + .join(file.get_name()) + .with_extension("rs"); + // Skip protos that we didn't generate, such as those imported from other packages. + if let Ok(mut out_file) = fs::OpenOptions::new().append(true).open(out_filename) { + writeln!(&mut out_file, "").unwrap(); + writeln!(&mut out_file, "// Ekiden-specific implementations.").unwrap(); + + for message_type in file.get_message_type() { + writeln!( + &mut out_file, + "impl_serializable_protobuf!({});", + message_type.get_name() + ).unwrap(); + } + } + } +} + +/// Build local contract API files. +pub fn build_api() { + protoc(ProtocArgs { + out_dir: "src/generated/", + input: &["src/api.proto"], + includes: &["src/"], + }); +} + +/// Generates a module file with specified exported submodules. +pub fn generate_mod(output_dir: &str, modules: &[&str]) { + // Create directory if not exist + fs::create_dir_all(output_dir).unwrap(); + + // Create mod.rs + let output_mod_file = Path::new(&output_dir).join("mod.rs"); + let mut file = fs::File::create(output_mod_file).expect("Failed to create module file"); + + for module in modules { + writeln!(&mut file, "pub mod {};", module).unwrap(); + } + + // Create .gitignore + let output_gitignore_file = Path::new(&output_dir).join(".gitignore"); + let mut file = + fs::File::create(output_gitignore_file).expect("Failed to create .gitignore file"); + writeln!(&mut file, "*").unwrap(); +} + +/// Generates a module file with specified imported modules and exported submodules. +pub fn generate_mod_with_imports(output_dir: &str, imports: &[&str], modules: &[&str]) { + // Create directory if not exist + fs::create_dir_all(output_dir).unwrap(); + + // Create mod.rs + let output_mod_file = Path::new(&output_dir).join("mod.rs"); + let mut file = fs::File::create(output_mod_file).expect("Failed to create module file"); + + for import in imports { + writeln!(&mut file, "use {};", import).unwrap(); + } + + for module in modules { + writeln!(&mut file, "pub mod {};", module).unwrap(); + } + + // Create .gitignore + let output_gitignore_file = Path::new(&output_dir).join(".gitignore"); + let mut file = + fs::File::create(output_gitignore_file).expect("Failed to create .gitignore file"); + writeln!(&mut file, "*").unwrap(); +} + +/// Extract contract identity from a compiled contract. +pub fn get_contract_identity>(contract: P) -> Result> { + // Sigstruct headers in bundled enclave. + const SIGSTRUCT_HEADER_1: &[u8] = + b"\x06\x00\x00\x00\xe1\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00"; + const SIGSTRUCT_HEADER_2: &[u8] = + b"\x01\x01\x00\x00\x60\x00\x00\x00\x60\x00\x00\x00\x01\x00\x00\x00"; + + let contract_file = fs::File::open(contract)?; + let mut reader = io::BufReader::new(contract_file); + loop { + // Update current offset. + let current_offset = reader.seek(io::SeekFrom::Current(0)).unwrap(); + + // Read the buffer. + let mut buffer = vec![0; SIGSTRUCT_HEADER_1.len()]; + reader.read_exact(&mut buffer)?; + + if buffer == SIGSTRUCT_HEADER_1 { + // Skip 8 bytes and expect to find the second header there. + reader.seek(io::SeekFrom::Current(8))?; + + let mut buffer = vec![0u8; SIGSTRUCT_HEADER_2.len()]; + reader.read_exact(&mut buffer)?; + + if buffer == SIGSTRUCT_HEADER_2 { + // Found SIGSTRUCT header at current offset. + break; + } + + return Err(Error::new("Failed to find SIGSTRUCT header in contract")); + } else { + // Structure not found at current offset, move to next offset. + reader.seek(io::SeekFrom::Start(current_offset + 1))?; + } + } + + // Read ENCLAVEHASH field at offset 920 from second header (32 bytes). + let mut mr_enclave = vec![0u8; 32]; + reader.seek(io::SeekFrom::Current(920)).unwrap(); + reader.read_exact(&mut mr_enclave)?; + + Ok(mr_enclave) +} + +/// Extract contract identity from a compiled contract and write it to an output file. +pub fn generate_contract_identity(output: &str, contract: &str) { + let mr_enclave = get_contract_identity(contract).expect("Failed to get contract identity"); + + // Write ENCLAVEHASH to given output file. + let mut output_file = fs::File::create(output).expect("Failed to create output file"); + output_file + .write_all(&mr_enclave) + .expect("Failed to write contract ENCLAVEHASH"); + + println!("cargo:rerun-if-changed={}", contract); +} diff --git a/xargo/Xargo.toml.template b/xargo/Xargo.toml.template new file mode 100644 index 00000000000..82b458ab1ad --- /dev/null +++ b/xargo/Xargo.toml.template @@ -0,0 +1,30 @@ +[dependencies] +alloc = {} +panic_unwind = {} +panic_abort = {} + +[dependencies.compiler_builtins] +features = ["c", "compiler-builtins"] +git = "https://github.com/rust-lang-nursery/compiler-builtins" +stage = 1 + +[dependencies.std] +features = ["stdio", "untrusted_time"] +git = "https://github.com/ekiden/rust-sgx-sdk" +tag = "v0.9.7-ekiden1" +stage = 2 + +[dependencies.xargo_sgx_rand] +git = "https://github.com/ekiden/rust-sgx-sdk" +tag = "v0.9.7-ekiden1" +stage = 3 + +[dependencies.xargo_sgx_serialize] +git = "https://github.com/ekiden/rust-sgx-sdk" +tag = "v0.9.7-ekiden1" +stage = 3 + +[dependencies.xargo_sgx_tunittest] +git = "https://github.com/ekiden/rust-sgx-sdk" +tag = "v0.9.7-ekiden1" +stage = 3 diff --git a/xargo/x86_64-unknown-linux-sgx.json b/xargo/x86_64-unknown-linux-sgx.json new file mode 100644 index 00000000000..6cbb524f443 --- /dev/null +++ b/xargo/x86_64-unknown-linux-sgx.json @@ -0,0 +1,31 @@ +{ + "arch": "x86_64", + "cpu": "x86-64", + "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", + "dynamic-linking": true, + "env": "sgx", + "exe-allocation-crate": "alloc_system", + "executables": true, + "has-elf-tls": true, + "has-rpath": true, + "linker-flavor": "gcc", + "linker-is-gnu": true, + "llvm-target": "x86_64-unknown-linux-gnu", + "max-atomic-width": 64, + "os": "linux", + "position-independent-executables": true, + "pre-link-args": { + "gcc": [ + "-Wl,--as-needed", + "-Wl,-z,noexecstack", + "-m64" + ] + }, + "relro-level": "full", + "stack-probes": true, + "target-c-int-width": "32", + "target-endian": "little", + "target-family": "unix", + "target-pointer-width": "64", + "vendor": "unknown" +}