diff --git a/.changelog/5324.trivial.md b/.changelog/5324.trivial.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/go/consensus/cometbft/full/full.go b/go/consensus/cometbft/full/full.go index f00c6f82b43..7f6cdf283e4 100644 --- a/go/consensus/cometbft/full/full.go +++ b/go/consensus/cometbft/full/full.go @@ -580,40 +580,40 @@ func (t *fullService) lazyInit() error { // nolint: gocyclo } // Create CometBFT node. - tenderConfig := cmtconfig.DefaultConfig() - _ = viper.Unmarshal(&tenderConfig) - tenderConfig.SetRoot(cometbftDataDir) + cometConfig := cmtconfig.DefaultConfig() + _ = viper.Unmarshal(&cometConfig) + cometConfig.SetRoot(cometbftDataDir) timeoutCommit := t.genesis.Consensus.Parameters.TimeoutCommit emptyBlockInterval := t.genesis.Consensus.Parameters.EmptyBlockInterval - tenderConfig.Consensus.TimeoutCommit = timeoutCommit - tenderConfig.Consensus.SkipTimeoutCommit = t.genesis.Consensus.Parameters.SkipTimeoutCommit - tenderConfig.Consensus.CreateEmptyBlocks = true - tenderConfig.Consensus.CreateEmptyBlocksInterval = emptyBlockInterval - tenderConfig.Consensus.DebugUnsafeReplayRecoverCorruptedWAL = config.GlobalConfig.Consensus.Debug.UnsafeReplayRecoverCorruptedWAL && cmflags.DebugDontBlameOasis() - tenderConfig.Mempool.Version = cmtconfig.MempoolV1 - tenderConfig.Instrumentation.Prometheus = true - tenderConfig.Instrumentation.PrometheusListenAddr = "" - tenderConfig.TxIndex.Indexer = "null" - tenderConfig.P2P.ListenAddress = config.GlobalConfig.Consensus.ListenAddress - tenderConfig.P2P.ExternalAddress = config.GlobalConfig.Consensus.ExternalAddress - tenderConfig.P2P.PexReactor = !config.GlobalConfig.Consensus.P2P.DisablePeerExchange - tenderConfig.P2P.MaxNumInboundPeers = config.GlobalConfig.Consensus.P2P.MaxNumInboundPeers - tenderConfig.P2P.MaxNumOutboundPeers = config.GlobalConfig.Consensus.P2P.MaxNumOutboundPeers - tenderConfig.P2P.SendRate = config.GlobalConfig.Consensus.P2P.SendRate - tenderConfig.P2P.RecvRate = config.GlobalConfig.Consensus.P2P.RecvRate - tenderConfig.P2P.PersistentPeers = strings.Join(persistentPeers, ",") - tenderConfig.P2P.PersistentPeersMaxDialPeriod = config.GlobalConfig.Consensus.P2P.PersistenPeersMaxDialPeriod - tenderConfig.P2P.UnconditionalPeerIDs = strings.Join(unconditionalPeers, ",") - tenderConfig.P2P.Seeds = strings.Join(seeds, ",") - tenderConfig.P2P.AddrBookStrict = !(config.GlobalConfig.Consensus.Debug.P2PAddrBookLenient && cmflags.DebugDontBlameOasis()) - tenderConfig.P2P.AllowDuplicateIP = config.GlobalConfig.Consensus.Debug.P2PAllowDuplicateIP && cmflags.DebugDontBlameOasis() - tenderConfig.RPC.ListenAddress = "" + cometConfig.Consensus.TimeoutCommit = timeoutCommit + cometConfig.Consensus.SkipTimeoutCommit = t.genesis.Consensus.Parameters.SkipTimeoutCommit + cometConfig.Consensus.CreateEmptyBlocks = true + cometConfig.Consensus.CreateEmptyBlocksInterval = emptyBlockInterval + cometConfig.Consensus.DebugUnsafeReplayRecoverCorruptedWAL = config.GlobalConfig.Consensus.Debug.UnsafeReplayRecoverCorruptedWAL && cmflags.DebugDontBlameOasis() + cometConfig.Mempool.Version = cmtconfig.MempoolV1 + cometConfig.Instrumentation.Prometheus = true + cometConfig.Instrumentation.PrometheusListenAddr = "" + cometConfig.TxIndex.Indexer = "null" + cometConfig.P2P.ListenAddress = config.GlobalConfig.Consensus.ListenAddress + cometConfig.P2P.ExternalAddress = config.GlobalConfig.Consensus.ExternalAddress + cometConfig.P2P.PexReactor = !config.GlobalConfig.Consensus.P2P.DisablePeerExchange + cometConfig.P2P.MaxNumInboundPeers = config.GlobalConfig.Consensus.P2P.MaxNumInboundPeers + cometConfig.P2P.MaxNumOutboundPeers = config.GlobalConfig.Consensus.P2P.MaxNumOutboundPeers + cometConfig.P2P.SendRate = config.GlobalConfig.Consensus.P2P.SendRate + cometConfig.P2P.RecvRate = config.GlobalConfig.Consensus.P2P.RecvRate + cometConfig.P2P.PersistentPeers = strings.Join(persistentPeers, ",") + cometConfig.P2P.PersistentPeersMaxDialPeriod = config.GlobalConfig.Consensus.P2P.PersistenPeersMaxDialPeriod + cometConfig.P2P.UnconditionalPeerIDs = strings.Join(unconditionalPeers, ",") + cometConfig.P2P.Seeds = strings.Join(seeds, ",") + cometConfig.P2P.AddrBookStrict = !(config.GlobalConfig.Consensus.Debug.P2PAddrBookLenient && cmflags.DebugDontBlameOasis()) + cometConfig.P2P.AllowDuplicateIP = config.GlobalConfig.Consensus.Debug.P2PAllowDuplicateIP && cmflags.DebugDontBlameOasis() + cometConfig.RPC.ListenAddress = "" if len(sentryUpstreamAddrs) > 0 { t.Logger.Info("Acting as a cometbft sentry", "addrs", sentryUpstreamAddrs) // Append upstream addresses to persistent, private and unconditional peers. - tenderConfig.P2P.PersistentPeers += "," + strings.Join(sentryUpstreamAddrs, ",") + cometConfig.P2P.PersistentPeers += "," + strings.Join(sentryUpstreamAddrs, ",") var sentryUpstreamIDs []string for _, addr := range sentryUpstreamAddrs { @@ -625,11 +625,11 @@ func (t *fullService) lazyInit() error { // nolint: gocyclo } sentryUpstreamIDsStr := strings.Join(sentryUpstreamIDs, ",") - tenderConfig.P2P.PrivatePeerIDs += "," + sentryUpstreamIDsStr - tenderConfig.P2P.UnconditionalPeerIDs += "," + sentryUpstreamIDsStr + cometConfig.P2P.PrivatePeerIDs += "," + sentryUpstreamIDsStr + cometConfig.P2P.UnconditionalPeerIDs += "," + sentryUpstreamIDsStr } - if !tenderConfig.P2P.PexReactor { + if !cometConfig.P2P.PexReactor { t.Logger.Info("pex reactor disabled", logging.LogEvent, api.LogEventPeerExchangeDisabled, ) @@ -717,8 +717,8 @@ func (t *fullService) lazyInit() error { // nolint: gocyclo t.Unlock() // Enable state sync in the configuration. - tenderConfig.StateSync.Enable = true - tenderConfig.StateSync.TrustHash = config.GlobalConfig.Consensus.StateSync.TrustHash + cometConfig.StateSync.Enable = true + cometConfig.StateSync.TrustHash = config.GlobalConfig.Consensus.StateSync.TrustHash // Create new state sync state provider. cfg := lightAPI.ClientConfig{ @@ -726,7 +726,7 @@ func (t *fullService) lazyInit() error { // nolint: gocyclo TrustOptions: cmtlight.TrustOptions{ Period: config.GlobalConfig.Consensus.StateSync.TrustPeriod, Height: int64(config.GlobalConfig.Consensus.StateSync.TrustHeight), - Hash: tenderConfig.StateSync.TrustHashBytes(), + Hash: cometConfig.StateSync.TrustHashBytes(), }, } if stateProvider, err = newStateProvider(t.ctx, t.genesis.ChainContext(), cfg, t.p2p); err != nil { @@ -737,13 +737,13 @@ func (t *fullService) lazyInit() error { // nolint: gocyclo } } - t.node, err = cmtnode.NewNode(tenderConfig, + t.node, err = cmtnode.NewNode(cometConfig, cometbftPV, &cmtp2p.NodeKey{PrivKey: crypto.SignerToCometBFT(t.identity.P2PSigner)}, cmtproxy.NewLocalClientCreator(t.mux.Mux()), cometbftGenesisProvider, wrapDbProvider, - cmtnode.DefaultMetricsProvider(tenderConfig.Instrumentation), + cmtnode.DefaultMetricsProvider(cometConfig.Instrumentation), tmcommon.NewLogAdapter(!config.GlobalConfig.Consensus.LogDebug), cmtnode.StateProvider(stateProvider), ) diff --git a/go/oasis-node/cmd/debug/fixgenesis/fixgenesis.go b/go/oasis-node/cmd/debug/fixgenesis/fixgenesis.go index 861c3392347..ef025593c27 100644 --- a/go/oasis-node/cmd/debug/fixgenesis/fixgenesis.go +++ b/go/oasis-node/cmd/debug/fixgenesis/fixgenesis.go @@ -26,7 +26,8 @@ import ( staking "github.com/oasisprotocol/oasis-core/go/staking/api" ) -const cfgNewGenesis = "genesis.new_file" +// CfgNewGenesisFile is the flag used to specify a new genesis file. +const CfgNewGenesisFile = "genesis.new_file" var ( fixGenesisCmd = &cobra.Command{ @@ -83,7 +84,7 @@ func doFixGenesis(cmd *cobra.Command, args []string) { } // Write out the new genesis document. - w, shouldClose, err := cmdCommon.GetOutputWriter(cmd, cfgNewGenesis) + w, shouldClose, err := cmdCommon.GetOutputWriter(cmd, CfgNewGenesisFile) if err != nil { logger.Error("failed to get writer for fixed genesis file", "err", err, @@ -449,6 +450,6 @@ func Register(parentCmd *cobra.Command) { } func init() { - newGenesisFlag.String(cfgNewGenesis, "genesis_fixed.json", "path to fixed genesis document") + newGenesisFlag.String(CfgNewGenesisFile, "genesis_fixed.json", "path to fixed genesis document") _ = viper.BindPFlags(newGenesisFlag) } diff --git a/go/oasis-node/cmd/debug/txsource/workload/runtime.go b/go/oasis-node/cmd/debug/txsource/workload/runtime.go index f1306e9ae18..61ce3af845d 100644 --- a/go/oasis-node/cmd/debug/txsource/workload/runtime.go +++ b/go/oasis-node/cmd/debug/txsource/workload/runtime.go @@ -195,7 +195,7 @@ func (r *runtime) validateEvents(ctx context.Context, rtc runtimeClient.RuntimeC return nil } -func (r *runtime) submitRuntimeRquest(ctx context.Context, rtc runtimeClient.RuntimeClient, req *TxnCall) (*TxnOutput, uint64, error) { +func (r *runtime) submitRuntimeRequest(ctx context.Context, rtc runtimeClient.RuntimeClient, req *TxnCall) (*TxnOutput, uint64, error) { var rsp TxnOutput rtx := &runtimeClient.SubmitTxRequest{ RuntimeID: r.runtimeID, @@ -243,7 +243,7 @@ func (r *runtime) doInsertRequest(ctx context.Context, rng *rand.Rand, rtc runti Value: value, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit insert request failure", "request", req, @@ -293,7 +293,7 @@ func (r *runtime) doGetRequest(ctx context.Context, rng *rand.Rand, rtc runtimeC Key: key, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit get request failure", "request", req, @@ -340,7 +340,7 @@ func (r *runtime) doRemoveRequest(ctx context.Context, rng *rand.Rand, rtc runti Key: key, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit remove request failure", "request", req, @@ -577,7 +577,7 @@ func (r *runtime) doWithdrawRequest(ctx context.Context, rng *rand.Rand, rtc run }, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit withdraw request failure", "request", req, @@ -621,7 +621,7 @@ func (r *runtime) doTransferRequest(ctx context.Context, rng *rand.Rand, rtc run }, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit transfer request failure", "request", req, @@ -665,7 +665,7 @@ func (r *runtime) doAddEscrowRequest(ctx context.Context, rng *rand.Rand, rtc ru }, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit add escrow request failure", "request", req, @@ -711,7 +711,7 @@ func (r *runtime) doReclaimEscrowRequest(ctx context.Context, rng *rand.Rand, rt }, }, } - rsp, round, err := r.submitRuntimeRquest(ctx, rtc, req) + rsp, round, err := r.submitRuntimeRequest(ctx, rtc, req) if err != nil { r.Logger.Error("Submit reclaim escrow request failure", "request", req, diff --git a/go/oasis-test-runner/oasis/cli/cli.go b/go/oasis-test-runner/oasis/cli/cli.go index f699d53a58a..4eae8fb955d 100644 --- a/go/oasis-test-runner/oasis/cli/cli.go +++ b/go/oasis-test-runner/oasis/cli/cli.go @@ -53,6 +53,8 @@ type Helpers struct { Consensus *ConsensusHelpers Registry *RegistryHelpers Keymanager *KeymanagerHelpers + Debug *DebugHelpers + Genesis *GenesisHelpers } // New creates new oasis-node cli helpers. @@ -68,9 +70,16 @@ func New(env *env.Env, factory Factory, logger *logging.Logger) *Helpers { Consensus: &ConsensusHelpers{base}, Registry: &RegistryHelpers{base}, Keymanager: &KeymanagerHelpers{base}, + Debug: &DebugHelpers{base}, + Genesis: &GenesisHelpers{base}, } } +// SetConfig sets the configuration for the oasis-node cli helpers. +func (h *Helpers) SetConfig(cfg Config) { + h.cfg = cfg +} + // UnsafeReset launches the unsafe-reset subcommand, clearing all consensus and (optionally) // runtime state. func (h *Helpers) UnsafeReset(dataDir string, preserveRuntimeStorage, preserveLocalStorage, force bool) error { diff --git a/go/oasis-test-runner/oasis/cli/debug.go b/go/oasis-test-runner/oasis/cli/debug.go new file mode 100644 index 00000000000..6196485aaf6 --- /dev/null +++ b/go/oasis-test-runner/oasis/cli/debug.go @@ -0,0 +1,36 @@ +package cli + +import ( + "fmt" + + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common" + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/flags" + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/debug/fixgenesis" +) + +// DebugHelpers contains the oasis-node debug CLI helpers. +type DebugHelpers struct { + *helpersBase +} + +// FixGenesis is a wrapper for "debug fix-genesis" subcommand. +func (d *DebugHelpers) FixGenesis( + genesisFilePath string, + fixedGenesisFilePath string, +) error { + d.logger.Info("fixing genesis file") + + args := []string{ + "debug", "fix-genesis", + "--" + flags.CfgGenesisFile, genesisFilePath, + "--" + fixgenesis.CfgNewGenesisFile, fixedGenesisFilePath, + "--" + flags.CfgDebugDontBlameOasis, + "--" + common.CfgDebugAllowTestKeys, + } + + if out, err := d.runSubCommandWithOutput("debug-fix-genesis", args); err != nil { + return fmt.Errorf("failed to run 'debug fix-genesis': error: %w output: %s", err, out.String()) + } + + return nil +} diff --git a/go/oasis-test-runner/oasis/cli/genesis.go b/go/oasis-test-runner/oasis/cli/genesis.go new file mode 100644 index 00000000000..f60cf506a6d --- /dev/null +++ b/go/oasis-test-runner/oasis/cli/genesis.go @@ -0,0 +1,56 @@ +package cli + +import ( + "fmt" + + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common" + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/flags" + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/grpc" + "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/stake" +) + +// GenesisHelpers contains the oasis-node genesis CLI helpers. +type GenesisHelpers struct { + *helpersBase +} + +// Check is a wrapper for "genesis check" subcommand. +func (g *GenesisHelpers) Check( + genesisFilePath string, +) (string, error) { + g.logger.Info("checking genesis file") + + args := []string{ + "genesis", "check", + "--" + flags.CfgGenesisFile, genesisFilePath, + "--" + flags.CfgDebugDontBlameOasis, + "--" + common.CfgDebugAllowTestKeys, + } + + out, err := g.runSubCommandWithOutput("genesis-check", args) + if err != nil { + return "", fmt.Errorf("failed to run 'genesis check': error: %w output: %s", err, out.String()) + } + + return out.String(), nil +} + +// Dump is a wrapper for "genesis dump" subcommand. +func (g *GenesisHelpers) Dump( + genesisFilePath string, +) error { + g.logger.Info("dumping network state to genesis file") + + args := []string{ + "genesis", "dump", + "--" + stake.CfgHeight, "0", + "--" + flags.CfgGenesisFile, genesisFilePath, + "--" + grpc.CfgAddress, "unix:" + g.cfg.NodeSocketPath, + } + + if out, err := g.runSubCommandWithOutput("genesis-dump", args); err != nil { + return fmt.Errorf("failed to run 'genesis dump': error: %w output: %s", err, out.String()) + } + + return nil +} diff --git a/go/oasis-test-runner/oasis/runtime.go b/go/oasis-test-runner/oasis/runtime.go index fc2593894a7..de405987571 100644 --- a/go/oasis-test-runner/oasis/runtime.go +++ b/go/oasis-test-runner/oasis/runtime.go @@ -149,25 +149,38 @@ func (rt *Runtime) BundlePaths() []string { return paths } -// RefreshRuntimeBundles makes sure the generated runtime bundles are refreshed. -func (rt *Runtime) RefreshRuntimeBundles() error { - for i, deployCfg := range rt.cfgSave.deployments { - fn := rt.bundlePath(i) +// RefreshRuntimeBundle makes sure the generated runtime bundle is refreshed. +func (rt *Runtime) RefreshRuntimeBundle(deploymentIndex int) error { + if deploymentIndex < 0 || deploymentIndex >= len(rt.cfgSave.deployments) { + return fmt.Errorf("invalid deployment index") + } - // Remove the generated bundle (if any). - if err := os.Remove(fn); err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } + fn := rt.bundlePath(deploymentIndex) - deployCfg.bundle = nil - deployCfg.mrEnclave = nil + // Remove the generated bundle (if any). + if err := os.Remove(fn); err != nil && !errors.Is(err, os.ErrNotExist) { + return err } + deployCfg := rt.cfgSave.deployments[deploymentIndex] + deployCfg.bundle = nil + deployCfg.mrEnclave = nil + // Generate a fresh bundle. - _, err := rt.ToRuntimeBundles() + _, err := rt.toRuntimeBundle(deploymentIndex) return err } +// RefreshRuntimeBundles makes sure the generated runtime bundles are refreshed. +func (rt *Runtime) RefreshRuntimeBundles() error { + for i := range rt.cfgSave.deployments { + if err := rt.RefreshRuntimeBundle(i); err != nil { + return err + } + } + return nil +} + // ToRuntimeBundles serializes the runtime to disk and returns the bundle. func (rt *Runtime) ToRuntimeBundles() ([]*bundle.Bundle, error) { var bundles []*bundle.Bundle @@ -183,6 +196,10 @@ func (rt *Runtime) ToRuntimeBundles() ([]*bundle.Bundle, error) { } func (rt *Runtime) toRuntimeBundle(deploymentIndex int) (*bundle.Bundle, error) { + if deploymentIndex < 0 || deploymentIndex >= len(rt.cfgSave.deployments) { + return nil, fmt.Errorf("invalid deployment index") + } + deployCfg := rt.cfgSave.deployments[deploymentIndex] fn := rt.bundlePath(deploymentIndex) switch _, err := os.Stat(fn); err { diff --git a/go/oasis-test-runner/scenario/e2e/genesis_file.go b/go/oasis-test-runner/scenario/e2e/genesis_file.go index 14d2559d670..5f198d9ce3f 100644 --- a/go/oasis-test-runner/scenario/e2e/genesis_file.go +++ b/go/oasis-test-runner/scenario/e2e/genesis_file.go @@ -66,6 +66,8 @@ func (s *genesisFileImpl) Fixture() (*oasis.NetworkFixture, error) { } func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { + cli := cli.New(childEnv, s.Net, s.Logger) + // Manually provision genesis file. s.Logger.Info("manually provisioning genesis file before starting the network") if err := s.Net.MakeGenesis(); err != nil { @@ -75,7 +77,7 @@ func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { cfg := s.Net.Config() cfg.GenesisFile = s.Net.GenesisPath() - if _, err := s.runGenesisCheckCmd(childEnv, s.Net.GenesisPath()); err != nil { + if _, err := cli.Genesis.Check(s.Net.GenesisPath()); err != nil { return fmt.Errorf("e2e/genesis-file: running genesis check failed: %w", err) } s.Logger.Info("manually provisioned genesis file passed genesis check command") @@ -92,25 +94,18 @@ func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { // Dump network state to a genesis file. s.Logger.Info("dumping network state to genesis file") dumpPath := filepath.Join(childEnv.Dir(), "genesis_dump.json") - args := []string{ - "genesis", "dump", - "--height", "0", - "--genesis.file", dumpPath, - "--address", "unix:" + s.Net.Validators()[0].SocketPath(), - } - out, err := cli.RunSubCommandWithOutput(childEnv, s.Logger, "genesis-file", s.Net.Config().NodeBinary, args) - if err != nil { - return fmt.Errorf("e2e/genesis-file: failed to dump state: error: %w output: %s", err, out.String()) + if err := cli.Genesis.Dump(dumpPath); err != nil { + return fmt.Errorf("e2e/genesis-file: failed to dump state: %w", err) } - if _, err = s.runGenesisCheckCmd(childEnv, dumpPath); err != nil { + if _, err := cli.Genesis.Check(dumpPath); err != nil { return fmt.Errorf("e2e/genesis-file: running genesis check failed: %w", err) } s.Logger.Info("genesis file from dumped network state passed genesis check command") // Check if the latest Mainnet genesis file passes genesis check. latestMainnetGenesis := filepath.Join(childEnv.Dir(), "genesis_mainnet.json") - if err = s.downloadGenesisFile(childEnv, latestMainnetGenesis); err != nil { + if err := s.downloadGenesisFile(childEnv, latestMainnetGenesis); err != nil { return fmt.Errorf("e2e/genesis-file: failed to download latest Mainnet genesis "+ "file at '%s': %w", genesisURL, err) } @@ -121,14 +116,14 @@ func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { if genesisNeedsUpgrade { // When upgrade is needed, run fix-genesis. latestMainnetGenesisFixed = filepath.Join(childEnv.Dir(), "genesis_mainnet_fixed.json") - if err = s.RunFixGenesisCmd(childEnv, latestMainnetGenesis, latestMainnetGenesisFixed); err != nil { + if err := cli.Debug.FixGenesis(latestMainnetGenesis, latestMainnetGenesisFixed); err != nil { return fmt.Errorf("e2e/genesis-file: failed run fix-genesis on latest Mainnet genesis "+ "file at '%s': %w", genesisURL, err) } } else { latestMainnetGenesisFixed = latestMainnetGenesis } - checkOut, err := s.runGenesisCheckCmd(childEnv, latestMainnetGenesisFixed) + checkOut, err := cli.Genesis.Check(latestMainnetGenesisFixed) switch { case err != nil: return fmt.Errorf("e2e/genesis-file: running genesis check for the latest Mainnet"+ @@ -152,7 +147,7 @@ func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { if err = s.createUncanonicalGenesisFile(childEnv, uncanonicalGenesis); err != nil { return fmt.Errorf("e2e/genesis-file: creating uncanonical genesis file failed: %w", err) } - _, err = s.runGenesisCheckCmd(childEnv, uncanonicalGenesis) + _, err = cli.Genesis.Check(uncanonicalGenesis) expectedError := "genesis file is not in canonical form, see the diff on stderr" switch { case err == nil: @@ -173,36 +168,6 @@ func (s *genesisFileImpl) Run(ctx context.Context, childEnv *env.Env) error { return nil } -func (s *genesisFileImpl) runGenesisCheckCmd(childEnv *env.Env, genesisFilePath string) (string, error) { - args := []string{ - "genesis", "check", - "--genesis.file", genesisFilePath, - "--debug.dont_blame_oasis", - "--debug.allow_test_keys", - } - out, err := cli.RunSubCommandWithOutput(childEnv, s.Logger, "genesis-file", s.Net.Config().NodeBinary, args) - if err != nil { - return "", fmt.Errorf("genesis check failed: error: %w output: %s", err, out.String()) - } - return out.String(), nil -} - -// RunFixGenesisCmd runs the 'fix-genesis' command. -func (s *Scenario) RunFixGenesisCmd(childEnv *env.Env, genesisFilePath, fixedGenesisFilePath string) error { - args := []string{ - "debug", "fix-genesis", - "--genesis.file", genesisFilePath, - "--genesis.new_file", fixedGenesisFilePath, - "--debug.dont_blame_oasis", - "--debug.allow_test_keys", - } - out, err := cli.RunSubCommandWithOutput(childEnv, s.Logger, "genesis-file", s.Net.Config().NodeBinary, args) - if err != nil { - return fmt.Errorf("debug fix-genesis failed: error: %w output: %s", err, out.String()) - } - return nil -} - func (s *genesisFileImpl) downloadGenesisFile(childEnv *env.Env, path string) error { // Get the data. resp, err := http.Get(genesisURL) diff --git a/go/oasis-test-runner/scenario/e2e/helpers_consensus.go b/go/oasis-test-runner/scenario/e2e/helpers_consensus.go new file mode 100644 index 00000000000..9247f577689 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/helpers_consensus.go @@ -0,0 +1,249 @@ +package e2e + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "strconv" + + beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" + "github.com/oasisprotocol/oasis-core/go/common/crypto/signature" + "github.com/oasisprotocol/oasis-core/go/common/entity" + consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" + registry "github.com/oasisprotocol/oasis-core/go/registry/api" + staking "github.com/oasisprotocol/oasis-core/go/staking/api" +) + +// TrustRoot is a consensus trust root. +type TrustRoot struct { + Height string + Hash string + ChainContext string +} + +// WaitBlocks waits for the specified number of blocks. +func (sc *Scenario) WaitBlocks(ctx context.Context, n int) (*consensus.Block, error) { + sc.Logger.Info("waiting for blocks", "n", n) + + blockCh, blockSub, err := sc.Net.Controller().Consensus.WatchBlocks(ctx) + if err != nil { + return nil, err + } + defer blockSub.Close() + + var blk *consensus.Block + for i := 0; i < n; i++ { + select { + case blk = <-blockCh: + sc.Logger.Info("new block", + "height", blk.Height, + ) + case <-ctx.Done(): + return nil, fmt.Errorf("timed out waiting for blocks") + } + } + + return blk, nil +} + +// WaitEpochs waits for the specified number of epochs. +func (sc *Scenario) WaitEpochs(ctx context.Context, n beacon.EpochTime) error { + sc.Logger.Info("waiting few epochs", "n", n) + + epoch, err := sc.Net.ClientController().Beacon.GetEpoch(ctx, consensus.HeightLatest) + if err != nil { + return err + } + if err := sc.Net.ClientController().Beacon.WaitEpoch(ctx, epoch+n); err != nil { + return err + } + return nil +} + +// ChainContext returns the consensus chain context. +func (sc *Scenario) ChainContext(ctx context.Context) (string, error) { + sc.Logger.Info("fetching consensus chain context") + + cc, err := sc.Net.Controller().Consensus.GetChainContext(ctx) + if err != nil { + return "", err + } + return cc, nil +} + +// TrustRoot returns a suitable trust root after running the network for a few blocks. +func (sc *Scenario) TrustRoot(ctx context.Context) (*TrustRoot, error) { + sc.Logger.Info("preparing trust root") + + block, err := sc.WaitBlocks(ctx, 3) + if err != nil { + return nil, err + } + + chainContext, err := sc.ChainContext(ctx) + if err != nil { + return nil, err + } + + return &TrustRoot{ + Height: strconv.FormatInt(block.Height, 10), + Hash: block.Hash.Hex(), + ChainContext: chainContext, + }, nil +} + +// TestEntityNonce returns the nonce of the test entity. +func (sc *Scenario) TestEntityNonce(ctx context.Context) (uint64, error) { + ent, _, err := entity.TestEntity() + if err != nil { + return 0, err + } + return sc.EntityNonce(ctx, ent) +} + +// EntityNonce returns the nonce of the specified entity. +func (sc *Scenario) EntityNonce(ctx context.Context, ent *entity.Entity) (uint64, error) { + addr := staking.NewAddress(ent.ID) + return sc.Net.ClientController().Consensus.GetSignerNonce(ctx, &consensus.GetSignerNonceRequest{ + Height: consensus.HeightLatest, + AccountAddress: addr, + }) +} + +// EntityNonceByID returns the nonce of the specified entity. +func (sc *Scenario) EntityNonceByID(ctx context.Context, id signature.PublicKey) (uint64, error) { + ent, err := sc.Net.ClientController().Registry.GetEntity(ctx, ®istry.IDQuery{ + Height: consensus.HeightLatest, + ID: id, + }) + if err != nil { + return 0, err + } + return sc.EntityNonce(ctx, ent) +} + +// ExportedGenesisFiles gathers exported genesis files and ensures all exported genesis files match. +func (sc *Scenario) ExportedGenesisFiles(skipCompute bool) ([]string, error) { + dumpGlob := "genesis-*.json" + + // Gather all nodes. + var nodes []interface { + ExportsPath() string + } + for _, v := range sc.Net.Validators() { + nodes = append(nodes, v) + } + if !skipCompute { + for _, n := range sc.Net.ComputeWorkers() { + nodes = append(nodes, n) + } + } + for _, n := range sc.Net.Keymanagers() { + nodes = append(nodes, n) + } + + // Gather all genesis files. + var files []string + for _, node := range nodes { + dumpGlobPath := filepath.Join(node.ExportsPath(), dumpGlob) + globMatch, err := filepath.Glob(dumpGlobPath) + if err != nil { + return nil, fmt.Errorf("glob failed: %s: %w", dumpGlobPath, err) + } + if len(globMatch) == 0 { + return nil, fmt.Errorf("genesis file not found in: %s", dumpGlobPath) + } + if len(globMatch) > 1 { + return nil, fmt.Errorf("more than one genesis file found in: %s", dumpGlobPath) + } + files = append(files, globMatch[0]) + } + + // Assert all exported files match. + var firstHash hash.Hash + for _, file := range files { + // Compute hash. + f, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("failed to open file: %s: %w", file, err) + } + defer f.Close() + hnew := sha256.New() + if _, err := io.Copy(hnew, f); err != nil { + return nil, fmt.Errorf("sha256 failed on: %s: %w", file, err) + } + if firstHash == nil { + firstHash = hnew + } + + // Compare hash with first hash. + if !bytes.Equal(firstHash.Sum(nil), hnew.Sum(nil)) { + return nil, fmt.Errorf("exported genesis files do not match %s, %s", files[0], file) + } + } + + return files, nil +} + +// RegisterEntity registers the specified entity. +func (sc *Scenario) RegisterEntity(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, ent *oasis.Entity, nonce uint64) error { + txPath := uniqueFilepath(filepath.Join(childEnv.Dir(), "register_entity.json")) + if err := cli.Registry.GenerateRegisterEntityTx(ent.Dir(), nonce, txPath); err != nil { + return fmt.Errorf("failed to generate register entity tx: %w", err) + } + if err := cli.Consensus.SubmitTx(txPath); err != nil { + return fmt.Errorf("failed to submit register entity tx: %w", err) + } + + return nil +} + +// RegisterRuntime registers the specified runtime. +func (sc *Scenario) RegisterRuntime(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rt registry.Runtime, nonce uint64) error { + txPath := uniqueFilepath(childEnv.Dir(), fmt.Sprintf("register_runtime_%s.json", rt.ID)) + if err := cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), rt, nonce, txPath); err != nil { + return fmt.Errorf("failed to generate register runtime tx: %w", err) + } + + if err := cli.Consensus.SubmitTx(txPath); err != nil { + return fmt.Errorf("failed to register runtime: %w", err) + } + + return nil +} + +// uniqueFilepath joins any number of path elements into a single path, checks if a file exists +// at that path, and if it does, appends a unique suffix to the filename to ensure the returned +// path is not already in use. +func uniqueFilepath(elem ...string) string { + path := filepath.Join(elem...) + if !fileExists(path) { + return path + } + + dir, filename := filepath.Split(path) + extension := filepath.Ext(filename) + prefix := filename[:len(filename)-len(extension)] + + for suffix := 1; ; suffix++ { + newFilename := fmt.Sprintf("%s_%d%s", prefix, suffix, extension) + newPath := filepath.Join(dir, newFilename) + if !fileExists(newPath) { + return newPath + } + } +} + +// fileExists returns true iff the named file exists. +func fileExists(name string) bool { + _, err := os.Stat(name) + return err == nil +} diff --git a/go/oasis-test-runner/scenario/e2e/e2e.go b/go/oasis-test-runner/scenario/e2e/helpers_network.go similarity index 53% rename from go/oasis-test-runner/scenario/e2e/e2e.go rename to go/oasis-test-runner/scenario/e2e/helpers_network.go index 0e0b4598c78..7646f9ac1c3 100644 --- a/go/oasis-test-runner/scenario/e2e/e2e.go +++ b/go/oasis-test-runner/scenario/e2e/helpers_network.go @@ -1,221 +1,19 @@ -// Package e2e implements the Oasis e2e test scenarios. package e2e import ( "bytes" - "context" - "crypto/sha256" "encoding/json" "fmt" - goHash "hash" - "io" "os" "path/filepath" - flag "github.com/spf13/pflag" - - "github.com/oasisprotocol/oasis-core/go/common/crypto/signature" - "github.com/oasisprotocol/oasis-core/go/common/entity" - "github.com/oasisprotocol/oasis-core/go/common/logging" - consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" - "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction" - consensusGenesis "github.com/oasisprotocol/oasis-core/go/consensus/genesis" genesis "github.com/oasisprotocol/oasis-core/go/genesis/api" genesisFile "github.com/oasisprotocol/oasis-core/go/genesis/file" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/cmd" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" - staking "github.com/oasisprotocol/oasis-core/go/staking/api" ) -const ( - // cfgNodeBinary is the path to oasis-node executable. - cfgNodeBinary = "node.binary" -) - -// ParamsDummyScenario is a dummy instance of E2E scenario used to register global E2E flags. -var ParamsDummyScenario = NewScenario("") - -// Scenario is a base scenario for oasis-node end-to-end tests. -type Scenario struct { - Net *oasis.Network - Flags *env.ParameterFlagSet - Logger *logging.Logger - - name string -} - -// NewScenario creates a new base scenario for oasis-node end-to-end tests. -func NewScenario(name string) *Scenario { - // Empty scenario name is used for registering global parameters only. - fullName := "e2e" - if name != "" { - fullName += "/" + name - } - - sc := &Scenario{ - name: fullName, - Logger: logging.GetLogger("scenario/" + fullName), - Flags: env.NewParameterFlagSet(fullName, flag.ContinueOnError), - } - sc.Flags.String(cfgNodeBinary, "oasis-node", "path to the node binary") - - return sc -} - -// Clone implements scenario.Scenario. -func (sc *Scenario) Clone() Scenario { - return Scenario{ - Net: sc.Net, - Flags: sc.Flags.Clone(), - Logger: sc.Logger, - name: sc.name, - } -} - -// Name implements scenario.Scenario. -func (sc *Scenario) Name() string { - return sc.name -} - -// Parameters implements scenario.Scenario. -func (sc *Scenario) Parameters() *env.ParameterFlagSet { - return sc.Flags -} - -// PreInit implements scenario.Scenario. -func (sc *Scenario) PreInit(childEnv *env.Env) error { - return nil -} - -// Fixture implements scenario.Scenario. -func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error) { - nodeBinary, _ := sc.Flags.GetString(cfgNodeBinary) - - return &oasis.NetworkFixture{ - Network: oasis.NetworkCfg{ - NodeBinary: nodeBinary, - Consensus: consensusGenesis.Genesis{ - Parameters: consensusGenesis.Parameters{ - GasCosts: transaction.Costs{ - consensusGenesis.GasOpTxByte: 1, - }, - }, - }, - }, - Entities: []oasis.EntityCfg{ - {IsDebugTestEntity: true}, - {}, - }, - Validators: []oasis.ValidatorFixture{ - {Entity: 1, Consensus: oasis.ConsensusFixture{SupplementarySanityInterval: 1}}, - {Entity: 1}, - {Entity: 1}, - }, - Seeds: []oasis.SeedFixture{{}}, - }, nil -} - -// Init implements scenario.Scenario. -func (sc *Scenario) Init(childEnv *env.Env, net *oasis.Network) error { - sc.Net = net - return nil -} - -// GetExportedGenesisFiles gathers exported genesis files and ensures -// all exported genesis files match. -func (sc *Scenario) GetExportedGenesisFiles(skipCompute bool) ([]string, error) { - dumpGlob := "genesis-*.json" - - // Gather all nodes. - var nodes []interface { - ExportsPath() string - } - for _, v := range sc.Net.Validators() { - nodes = append(nodes, v) - } - if !skipCompute { - for _, n := range sc.Net.ComputeWorkers() { - nodes = append(nodes, n) - } - } - for _, n := range sc.Net.Keymanagers() { - nodes = append(nodes, n) - } - - // Gather all genesis files. - var files []string - for _, node := range nodes { - dumpGlobPath := filepath.Join(node.ExportsPath(), dumpGlob) - globMatch, err := filepath.Glob(dumpGlobPath) - if err != nil { - return nil, fmt.Errorf("glob failed: %s: %w", dumpGlobPath, err) - } - if len(globMatch) == 0 { - return nil, fmt.Errorf("genesis file not found in: %s", dumpGlobPath) - } - if len(globMatch) > 1 { - return nil, fmt.Errorf("more than one genesis file found in: %s", dumpGlobPath) - } - files = append(files, globMatch[0]) - } - - // Assert all exported files match. - var firstHash goHash.Hash - for _, file := range files { - // Compute hash. - f, err := os.Open(file) - if err != nil { - return nil, fmt.Errorf("failed to open file: %s: %w", file, err) - } - defer f.Close() - hnew := sha256.New() - if _, err := io.Copy(hnew, f); err != nil { - return nil, fmt.Errorf("sha256 failed on: %s: %w", file, err) - } - if firstHash == nil { - firstHash = hnew - } - - // Compare hash with first hash. - if !bytes.Equal(firstHash.Sum(nil), hnew.Sum(nil)) { - return nil, fmt.Errorf("exported genesis files do not match %s, %s", files[0], file) - } - } - - return files, nil -} - -func (sc *Scenario) GetTestEntityNonce(ctx context.Context) (uint64, error) { - ent, _, err := entity.TestEntity() - if err != nil { - return 0, err - } - return sc.GetEntityNonce(ctx, ent) -} - -func (sc *Scenario) GetEntityNonce(ctx context.Context, ent *entity.Entity) (uint64, error) { - addr := staking.NewAddress(ent.ID) - return sc.Net.ClientController().Consensus.GetSignerNonce(ctx, &consensus.GetSignerNonceRequest{ - Height: consensus.HeightLatest, - AccountAddress: addr, - }) -} - -func (sc *Scenario) GetEntityNonceByID(ctx context.Context, id signature.PublicKey) (uint64, error) { - ent, err := sc.Net.ClientController().Registry.GetEntity(ctx, ®istry.IDQuery{ - Height: consensus.HeightLatest, - ID: id, - }) - if err != nil { - return 0, err - } - return sc.GetEntityNonce(ctx, ent) -} - // Flag for consensus state reset. const ( PreserveValidatorRuntimeStorage uint8 = iota @@ -476,57 +274,3 @@ func (sc *Scenario) finishWithoutChild() error { return sc.Net.CheckLogWatchers() } } - -// RegisterScenarios registers all end-to-end scenarios. -func RegisterScenarios() error { - // Register non-scenario-specific parameters. - cmd.RegisterScenarioParams(ParamsDummyScenario.Name(), ParamsDummyScenario.Parameters()) - - // Register default scenarios which are executed, if no test names provided. - for _, s := range []scenario.Scenario{ - // Registry CLI test. - RegistryCLI, - // Stake CLI test. - StakeCLI, - // Gas fees tests. - GasFeesStaking, - GasFeesStakingDumpRestore, - // Identity CLI test. - IdentityCLI, - // Genesis file test. - GenesisFile, - // Node upgrade tests. - NodeUpgradeDummy, - NodeUpgradeMaxAllowances, - NodeUpgradeV62, - NodeUpgradeEmpty, - NodeUpgradeCancel, - // Debonding entries from genesis test. - Debond, - // Early query test. - EarlyQuery, - EarlyQueryInitHeight, - // Consensus state sync. - ConsensusStateSync, - // Multiple seeds test. - MultipleSeeds, - // Seed API test. - SeedAPI, - // ValidatorEquivocation test. - ValidatorEquivocation, - // Byzantine VRF beacon tests. - ByzantineVRFBeaconHonest, - ByzantineVRFBeaconEarly, - ByzantineVRFBeaconMissing, - // Minimum transact balance test. - MinTransactBalance, - // Consensus governance update parameters tests. - ChangeParametersMinCommissionRate, - } { - if err := cmd.Register(s); err != nil { - return err - } - } - - return nil -} diff --git a/go/oasis-test-runner/scenario/e2e/registry_cli.go b/go/oasis-test-runner/scenario/e2e/registry_cli.go index 4852eacc081..dac8e2cbd0a 100644 --- a/go/oasis-test-runner/scenario/e2e/registry_cli.go +++ b/go/oasis-test-runner/scenario/e2e/registry_cli.go @@ -660,15 +660,9 @@ func (sc *registryCLIImpl) testRuntime(ctx context.Context, childEnv *env.Env, c // Empty genesis state root. testRuntime.Genesis.StateRoot.Empty() - // Generate register runtime transaction. - registerTxPath := filepath.Join(childEnv.Dir(), "registry_runtime_register.json") - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), testRuntime, 0, registerTxPath); err != nil { - return fmt.Errorf("failed to generate runtime register tx: %w", err) - } - - // Submit register runtime transaction. - if err = cli.Consensus.SubmitTx(registerTxPath); err != nil { - return fmt.Errorf("failed to submit runtime register tx: %w", err) + // Register runtime. + if err = sc.RegisterRuntime(ctx, childEnv, cli, testRuntime, 0); err != nil { + return err } // List runtimes. diff --git a/go/oasis-test-runner/scenario/e2e/runtime/archive_api.go b/go/oasis-test-runner/scenario/e2e/runtime/archive_api.go index 983ad0eadd7..2610b53d9d0 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/archive_api.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/archive_api.go @@ -20,7 +20,7 @@ import ( var ArchiveAPI scenario.Scenario = &archiveAPI{ Scenario: *NewScenario( "archive-api", - NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), + NewTestClient().WithScenario(InsertTransferKeyValueScenario), ), } @@ -240,7 +240,7 @@ func (sc *archiveAPI) testArchiveAPI(ctx context.Context, archiveCtrl *oasis.Con // Test runtime queries. rtClient := archiveCtrl.RuntimeClient sc.Logger.Info("testing runtime GetBlock") - blk, err := rtClient.GetBlock(ctx, &api.GetBlockRequest{RuntimeID: runtimeID, Round: api.RoundLatest}) + blk, err := rtClient.GetBlock(ctx, &api.GetBlockRequest{RuntimeID: KeyValueRuntimeID, Round: api.RoundLatest}) if err != nil { return fmt.Errorf("runtime GetBlock: %w", err) } @@ -249,31 +249,31 @@ func (sc *archiveAPI) testArchiveAPI(ctx context.Context, archiveCtrl *oasis.Con } sc.Logger.Info("testing runtime GetEvents") - _, err = rtClient.GetEvents(ctx, &api.GetEventsRequest{RuntimeID: runtimeID, Round: api.RoundLatest}) + _, err = rtClient.GetEvents(ctx, &api.GetEventsRequest{RuntimeID: KeyValueRuntimeID, Round: api.RoundLatest}) if err != nil { return fmt.Errorf("runtime GetEvents: %w", err) } sc.Logger.Info("testing runtime GetLastRetainBlock") - _, err = rtClient.GetLastRetainedBlock(ctx, runtimeID) + _, err = rtClient.GetLastRetainedBlock(ctx, KeyValueRuntimeID) if err != nil { return fmt.Errorf("runtime GetLastRetainedBlock: %w", err) } sc.Logger.Info("testing runtime GetTransactions") - _, err = rtClient.GetTransactions(ctx, &api.GetTransactionsRequest{RuntimeID: runtimeID, Round: api.RoundLatest}) + _, err = rtClient.GetTransactions(ctx, &api.GetTransactionsRequest{RuntimeID: KeyValueRuntimeID, Round: api.RoundLatest}) if err != nil { return fmt.Errorf("runtime GetTransactions: %w", err) } sc.Logger.Info("testing runtime GetTransactionsWithResults") - _, err = rtClient.GetTransactionsWithResults(ctx, &api.GetTransactionsRequest{RuntimeID: runtimeID, Round: api.RoundLatest}) + _, err = rtClient.GetTransactionsWithResults(ctx, &api.GetTransactionsRequest{RuntimeID: KeyValueRuntimeID, Round: api.RoundLatest}) if err != nil { return fmt.Errorf("runtime GetTransactionsWithResults: %w", err) } sc.Logger.Info("testing runtime WatchBlocks") - _, sub, err := rtClient.WatchBlocks(ctx, runtimeID) + _, sub, err := rtClient.WatchBlocks(ctx, KeyValueRuntimeID) if err != nil { return fmt.Errorf("runtime WatchBlocks: %w", err) } @@ -298,7 +298,7 @@ func (sc *archiveAPI) Run(ctx context.Context, childEnv *env.Env) error { // Wait for the client to exit. sc.Logger.Info("waiting for test client to exit") - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/byzantine.go b/go/oasis-test-runner/scenario/e2e/runtime/byzantine.go index 3b1f7d1a0d9..b6c9fd304f7 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/byzantine.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/byzantine.go @@ -464,7 +464,7 @@ func (sc *byzantineImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Start watching for runtime blocks. - blkCh, blkSub, err := sc.Net.ClientController().RuntimeClient.WatchBlocks(ctx, runtimeID) + blkCh, blkSub, err := sc.Net.ClientController().RuntimeClient.WatchBlocks(ctx, KeyValueRuntimeID) if err != nil { return fmt.Errorf("failed to watch blocks: %w", err) } @@ -477,7 +477,7 @@ func (sc *byzantineImpl) Run(ctx context.Context, childEnv *env.Env) error { sc.Logger.Info("getting genesis block") - genesisBlk, err := sc.Net.ClientController().RuntimeClient.GetGenesisBlock(ctx, runtimeID) + genesisBlk, err := sc.Net.ClientController().RuntimeClient.GetGenesisBlock(ctx, KeyValueRuntimeID) if err != nil { return fmt.Errorf("failed to get genesis block: %w", err) } @@ -547,7 +547,7 @@ WatchBlocksLoop: // Wait for all compute nodes to be synced. blk, err := sc.Net.ClientController().RuntimeClient.GetBlock(ctx, &runtimeClient.GetBlockRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: runtimeClient.RoundLatest, }) if err != nil { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/dump_restore.go b/go/oasis-test-runner/scenario/e2e/runtime/dump_restore.go index fe5dba2d744..f324b6b8e23 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/dump_restore.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/dump_restore.go @@ -54,7 +54,7 @@ func newDumpRestoreImpl( sc := &dumpRestoreImpl{ Scenario: *NewScenario( name, - NewKVTestClient().WithScenario(InsertKeyValueScenario), + NewTestClient().WithScenario(InsertKeyValueScenario), ), mapGenesisDocumentFn: mapGenesisDocumentFn, } @@ -154,7 +154,7 @@ func (sc *dumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -188,6 +188,6 @@ func (sc *dumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Check that everything works with restored state. - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) return sc.Scenario.Run(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/gas_fees.go b/go/oasis-test-runner/scenario/e2e/runtime/gas_fees.go index 0d22a8a231e..3e8d7f5f377 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/gas_fees.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/gas_fees.go @@ -118,13 +118,13 @@ func (sc *gasFeesRuntimesImpl) Run(ctx context.Context, childEnv *env.Env) error } // Wait for all nodes to be synced before we proceed. - if err := sc.waitNodesSynced(ctx); err != nil { + if err := sc.WaitNodesSynced(ctx); err != nil { return err } // Submit a runtime transaction to check whether transaction processing works. sc.Logger.Info("submitting transaction to runtime") - if _, err := sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, 0, "hello", "non-free world", false, 0); err != nil { + if _, err := sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, 0, "hello", "non-free world", false, 0); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/governance_upgrade.go b/go/oasis-test-runner/scenario/e2e/runtime/governance_upgrade.go index f202887a8a6..47aae7c67b5 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/governance_upgrade.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/governance_upgrade.go @@ -61,7 +61,7 @@ func newGovernanceConsensusUpgradeImpl(correctUpgradeVersion, cancelUpgrade bool sc := &governanceConsensusUpgradeImpl{ Scenario: *NewScenario( name, - NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), + NewTestClient().WithScenario(InsertTransferKeyValueScenario), ), correctUpgradeVersion: correctUpgradeVersion, shouldCancelUpgrade: cancelUpgrade, @@ -316,7 +316,7 @@ func (sc *governanceConsensusUpgradeImpl) Run(ctx context.Context, childEnv *env } // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -435,7 +435,7 @@ func (sc *governanceConsensusUpgradeImpl) Run(ctx context.Context, childEnv *env // Ensure genesis was exported and matches on all nodes. sc.Logger.Info("gathering exported genesis files") - _, err = sc.GetExportedGenesisFiles(false) + _, err = sc.ExportedGenesisFiles(false) if err != nil { return fmt.Errorf("failure getting exported genesis files: %w", err) } @@ -465,6 +465,6 @@ func (sc *governanceConsensusUpgradeImpl) Run(ctx context.Context, childEnv *env } // Check that runtime still works after the upgrade. - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) return sc.Scenario.Run(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/halt_restore.go b/go/oasis-test-runner/scenario/e2e/runtime/halt_restore.go index 30ebddc0291..556450f5ce0 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/halt_restore.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/halt_restore.go @@ -41,7 +41,7 @@ func newHaltRestoreImpl(suspended bool) scenario.Scenario { return &haltRestoreImpl{ Scenario: *NewScenario( name, - NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), + NewTestClient().WithScenario(InsertTransferKeyValueScenario), ), haltEpoch: beacon.EpochTime(haltEpoch), suspendRuntime: suspended, @@ -85,7 +85,7 @@ func (sc *haltRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { / nextEpoch++ // Next, after initial transitions. // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -150,7 +150,7 @@ func (sc *haltRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { / _, _, _ = reflect.Select(exitChs) sc.Logger.Info("gathering exported genesis files") - files, err := sc.GetExportedGenesisFiles(true) + files, err := sc.ExportedGenesisFiles(true) if err != nil { return fmt.Errorf("failure getting exported genesis files: %w", err) } @@ -220,7 +220,7 @@ func (sc *haltRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { / return err } - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) // Start the new network again and run the test client. if err = sc.StartNetworkAndWaitForClientSync(ctx); err != nil { @@ -229,8 +229,5 @@ func (sc *haltRestoreImpl) Run(ctx context.Context, childEnv *env.Env) error { / if _, err = sc.initialEpochTransitionsWith(ctx, fixture, genesisDoc.Beacon.Base); err != nil { return err } - if err = sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.WaitTestClientOnly() + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/halt_restore_nonmock.go b/go/oasis-test-runner/scenario/e2e/runtime/halt_restore_nonmock.go index 8c90d8aab6a..c5f355c4bff 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/halt_restore_nonmock.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/halt_restore_nonmock.go @@ -25,7 +25,7 @@ func newHaltRestoreNonMockImpl() scenario.Scenario { return &haltRestoreNonMockImpl{ Scenario: *NewScenario( name, - NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), + NewTestClient().WithScenario(InsertTransferKeyValueScenario), ), haltEpoch: 8, } @@ -61,7 +61,7 @@ func (sc *haltRestoreNonMockImpl) Run(ctx context.Context, childEnv *env.Env) er } // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -79,7 +79,7 @@ func (sc *haltRestoreNonMockImpl) Run(ctx context.Context, childEnv *env.Env) er _, _, _ = reflect.Select(exitChs) sc.Logger.Info("gathering exported genesis files") - files, err := sc.GetExportedGenesisFiles(true) + files, err := sc.ExportedGenesisFiles(true) if err != nil { return fmt.Errorf("failure getting exported genesis files: %w", err) } @@ -133,11 +133,11 @@ func (sc *haltRestoreNonMockImpl) Run(ctx context.Context, childEnv *env.Env) er return err } - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) // Start the new network again and run the test client. if err = sc.StartNetworkAndTestClient(ctx, childEnv); err != nil { return err } - return sc.WaitTestClientOnly() + return sc.WaitTestClient() } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_config.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_config.go new file mode 100644 index 00000000000..75fbf2262e2 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_config.go @@ -0,0 +1,27 @@ +package runtime + +import ( + "fmt" + + "github.com/oasisprotocol/oasis-core/go/common/node" +) + +// TEEHardware returns the configured TEE hardware. +func (sc *Scenario) TEEHardware() (node.TEEHardware, error) { + teeStr, _ := sc.Flags.GetString(cfgTEEHardware) + var tee node.TEEHardware + if err := tee.FromString(teeStr); err != nil { + return node.TEEHardwareInvalid, err + } + return tee, nil +} + +// BuildTargetDirs returns the configured build and target directories. +func (sc *Scenario) BuildTargetDirs() (string, string, error) { + buildDir, _ := sc.Flags.GetString(cfgRuntimeSourceDir) + targetDir, _ := sc.Flags.GetString(cfgRuntimeTargetDir) + if buildDir == "" || targetDir == "" { + return "", "", fmt.Errorf("runtime build dir and/or target dir not configured") + } + return buildDir, targetDir, nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_cosnensus.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_cosnensus.go new file mode 100644 index 00000000000..1f46789a70b --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_cosnensus.go @@ -0,0 +1,135 @@ +package runtime + +import ( + "context" + "crypto/rand" + "fmt" + + beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" +) + +func (sc *Scenario) initialEpochTransitions(ctx context.Context, fixture *oasis.NetworkFixture) (beacon.EpochTime, error) { + return sc.initialEpochTransitionsWith(ctx, fixture, 0) +} + +func (sc *Scenario) initialEpochTransitionsWith(ctx context.Context, fixture *oasis.NetworkFixture, baseEpoch beacon.EpochTime) (beacon.EpochTime, error) { + epoch := baseEpoch + 1 + advanceEpoch := func() error { + sc.Logger.Info("triggering epoch transition", + "epoch", epoch, + ) + if err := sc.Net.Controller().SetEpoch(ctx, epoch); err != nil { + return fmt.Errorf("failed to set epoch: %w", err) + } + sc.Logger.Info("epoch transition done", + "epoch", epoch, + ) + + epoch++ + + return nil + } + + if len(sc.Net.Keymanagers()) > 0 { + // First wait for validator and key manager nodes to register. Then perform an epoch + // transition which will cause the compute and storage nodes to register. + sc.Logger.Info("waiting for validators to initialize", + "num_validators", len(sc.Net.Validators()), + ) + for i, n := range sc.Net.Validators() { + if fixture.Validators[i].NoAutoStart { + // Skip nodes that don't auto start. + continue + } + if err := n.WaitReady(ctx); err != nil { + return epoch, fmt.Errorf("failed to wait for a validator: %w", err) + } + } + sc.Logger.Info("waiting for key managers to initialize", + "num_keymanagers", len(sc.Net.Keymanagers()), + ) + for i, n := range sc.Net.Keymanagers() { + if fixture.Keymanagers[i].NoAutoStart { + // Skip nodes that don't auto start. + continue + } + if err := n.WaitReady(ctx); err != nil { + return epoch, fmt.Errorf("failed to wait for a key manager: %w", err) + } + } + } + + if err := advanceEpoch(); err != nil { // Epoch 1 + return epoch, err + } + + // Wait for compute workers to become ready. + sc.Logger.Info("waiting for compute workers to initialize", + "num_compute_workers", len(sc.Net.ComputeWorkers()), + ) + for i, n := range sc.Net.ComputeWorkers() { + if fixture.ComputeWorkers[i].NoAutoStart { + // Skip nodes that don't auto start. + continue + } + if err := n.WaitReady(ctx); err != nil { + return epoch, fmt.Errorf("failed to wait for a compute worker: %w", err) + } + } + + // Byzantine nodes can only registered. If defined, since we cannot control them directly, wait + // for all nodes to become registered. + if len(sc.Net.Byzantine()) > 0 { + sc.Logger.Info("waiting for (all) nodes to register", + "num_nodes", sc.Net.NumRegisterNodes(), + ) + if err := sc.Net.Controller().WaitNodesRegistered(ctx, sc.Net.NumRegisterNodes()); err != nil { + return epoch, fmt.Errorf("failed to wait for nodes: %w", err) + } + } + + // Then perform epoch transition(s) to elect the committees. + if err := advanceEpoch(); err != nil { // Epoch 2 + return epoch, err + } + switch sc.Net.Config().Beacon.Backend { + case "", beacon.BackendVRF: + // The byzantine node gets jammed into a committee first thing, which + // breaks everything because our test case failure detection log watcher + // can't cope with expected failures. So once we elect, if the byzantine + // node is active, we need to immediately transition into doing interesting + // things. + if !sc.debugWeakAlphaOk { + // Committee elections won't happen the first round. + if err := advanceEpoch(); err != nil { // Epoch 3 + return epoch, err + } + // And nodes are ineligible to be elected till their registration + // epoch + 2. + if err := advanceEpoch(); err != nil { // Epoch 4 (or 3 if byzantine test) + return epoch, err + } + } + if !sc.debugNoRandomInitialEpoch { + // To prevent people from writing tests that depend on very precicse + // timekeeping by epoch, randomize the start epoch slightly. + // + // If this causes your test to fail, it is not this code that is + // wrong, it is the test that is wrong. + var randByte [1]byte + _, _ = rand.Read(randByte[:]) + numSkips := (int)(randByte[0]&3) + 1 + sc.Logger.Info("advancing the epoch to prevent hardcoding time assumptions in tests", + "num_advances", numSkips, + ) + for i := 0; i < numSkips; i++ { + if err := advanceEpoch(); err != nil { + return epoch, err + } + } + } + } + + return epoch, nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_keymanager.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_keymanager.go new file mode 100644 index 00000000000..c6492ac2d98 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_keymanager.go @@ -0,0 +1,428 @@ +package runtime + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + + "github.com/oasisprotocol/curve25519-voi/primitives/x25519" + + beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" + "github.com/oasisprotocol/oasis-core/go/common" + "github.com/oasisprotocol/oasis-core/go/common/cbor" + "github.com/oasisprotocol/oasis-core/go/common/sgx" + "github.com/oasisprotocol/oasis-core/go/common/version" + consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" + keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" + registry "github.com/oasisprotocol/oasis-core/go/registry/api" +) + +// KeyManagerStatus returns the latest key manager status. +func (sc *Scenario) KeyManagerStatus(ctx context.Context) (*keymanager.Status, error) { + return sc.Net.Controller().Keymanager.GetStatus(ctx, ®istry.NamespaceQuery{ + Height: consensus.HeightLatest, + ID: KeyManagerRuntimeID, + }) +} + +// MasterSecret returns the key manager master secret. +func (sc *Scenario) MasterSecret(ctx context.Context) (*keymanager.SignedEncryptedMasterSecret, error) { + secret, err := sc.Net.Controller().Keymanager.GetMasterSecret(ctx, ®istry.NamespaceQuery{ + Height: consensus.HeightLatest, + ID: KeyManagerRuntimeID, + }) + if err == keymanager.ErrNoSuchMasterSecret { + return nil, nil + } + return secret, err +} + +// WaitMasterSecret waits until the specified generation of the master secret is generated. +func (sc *Scenario) WaitMasterSecret(ctx context.Context, generation uint64) (*keymanager.Status, error) { + sc.Logger.Info("waiting for master secret", "generation", generation) + + mstCh, mstSub, err := sc.Net.Controller().Keymanager.WatchMasterSecrets(ctx) + if err != nil { + return nil, err + } + defer mstSub.Close() + + stCh, stSub, err := sc.Net.Controller().Keymanager.WatchStatuses(ctx) + if err != nil { + return nil, err + } + defer stSub.Close() + + var last *keymanager.Status + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case secret := <-mstCh: + if !secret.Secret.ID.Equal(&KeyManagerRuntimeID) { + continue + } + + sc.Logger.Info("master secret proposed", + "generation", secret.Secret.Generation, + "epoch", secret.Secret.Epoch, + "num_ciphertexts", len(secret.Secret.Secret.Ciphertexts), + ) + case status := <-stCh: + if !status.ID.Equal(&KeyManagerRuntimeID) { + continue + } + if status.NextGeneration() == 0 { + continue + } + if last != nil && status.Generation == last.Generation { + last = status + continue + } + + sc.Logger.Info("master secret rotation", + "generation", status.Generation, + "rotation_epoch", status.RotationEpoch, + ) + + if status.Generation >= generation { + return status, nil + } + last = status + } + } +} + +// WaitEphemeralSecrets waits for the specified number of ephemeral secrets to be generated. +func (sc *Scenario) WaitEphemeralSecrets(ctx context.Context, n int) (*keymanager.SignedEncryptedEphemeralSecret, error) { + sc.Logger.Info("waiting ephemeral secrets", "n", n) + + ephCh, ephSub, err := sc.Net.Controller().Keymanager.WatchEphemeralSecrets(ctx) + if err != nil { + return nil, err + } + defer ephSub.Close() + + var secret *keymanager.SignedEncryptedEphemeralSecret + for i := 0; i < n; i++ { + select { + case secret = <-ephCh: + sc.Logger.Info("ephemeral secret published", + "epoch", secret.Secret.Epoch, + ) + case <-ctx.Done(): + return nil, fmt.Errorf("timed out waiting for ephemeral secrets") + } + } + return secret, nil +} + +// UpdateRotationInterval updates the master secret rotation interval in the key manager policy. +func (sc *Scenario) UpdateRotationInterval(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rotationInterval beacon.EpochTime, nonce uint64) error { + sc.Logger.Info("updating master secret rotation interval in the key manager policy", + "interval", rotationInterval, + ) + + status, err := sc.KeyManagerStatus(ctx) + if err != nil && err != keymanager.ErrNoSuchStatus { + return err + } + + var policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX + if status != nil && status.Policy != nil { + policies = status.Policy.Policy.Enclaves + } + + if err := sc.ApplyKeyManagerPolicy(ctx, childEnv, cli, rotationInterval, policies, nonce); err != nil { + return err + } + + return nil +} + +// CompareLongtermPublicKeys compares long-term public keys generated by the specified +// key manager nodes. +func (sc *Scenario) CompareLongtermPublicKeys(ctx context.Context, idxs []int) error { + chainContext, err := sc.Net.Controller().Consensus.GetChainContext(ctx) + if err != nil { + return err + } + + status, err := sc.KeyManagerStatus(ctx) + if err != nil { + return err + } + + var generation uint64 + if status.Generation > 0 { + // Avoid verification problems when the consensus verifier is one block behind. + generation = status.Generation - 1 + } + + sc.Logger.Info("comparing long-term public keys generated by the key managers", + "ids", idxs, + "generation", generation, + ) + + keys := make(map[uint64]*x25519.PublicKey) + kms := sc.Net.Keymanagers() + for _, idx := range idxs { + km := kms[idx] + + // Prepare an RPC client which will be used to query key manager nodes + // for public ephemeral keys. + rpcClient, err := newKeyManagerRPCClient(chainContext) + if err != nil { + return err + } + peerID, err := rpcClient.addKeyManagerAddrToHost(km) + if err != nil { + return err + } + + for gen := uint64(0); gen <= generation; gen++ { + sc.Logger.Info("fetching public key", "generation", gen, "node", km.Name) + + var key *x25519.PublicKey + key, err = rpcClient.fetchPublicKey(ctx, gen, peerID) + switch { + case err != nil: + return err + case key == nil: + return fmt.Errorf("master secret generation %d not found", gen) + } + + if expected, ok := keys[gen]; ok && !bytes.Equal(expected[:], key[:]) { + return fmt.Errorf("derived keys don't match: expected %+X, given %+X", expected, key) + } + keys[gen] = key + + sc.Logger.Info("public key fetched", "key", fmt.Sprintf("%+X", key)) + } + if err != nil { + return err + } + } + if expected, size := int(generation)+1, len(keys); expected != size { + return fmt.Errorf("the number of derived keys doesn't match: expected %d, found %d", expected, size) + } + + return nil +} + +// KeymanagerInitResponse returns InitResponse of the specified key manager node. +func (sc *Scenario) KeymanagerInitResponse(ctx context.Context, idx int) (*keymanager.InitResponse, error) { + kms := sc.Net.Keymanagers() + if kmLen := len(kms); kmLen <= idx { + return nil, fmt.Errorf("expected more than %d keymanager, have: %v", idx, kmLen) + } + km := kms[idx] + + ctrl, err := oasis.NewController(km.SocketPath()) + if err != nil { + return nil, err + } + + // Extract ExtraInfo. + node, err := ctrl.Registry.GetNode( + ctx, + ®istry.IDQuery{ + ID: km.NodeID, + }, + ) + if err != nil { + return nil, err + } + rt := node.GetRuntime(KeyManagerRuntimeID, version.Version{}) + if rt == nil { + return nil, fmt.Errorf("key manager is missing keymanager runtime from descriptor") + } + var signedInitResponse keymanager.SignedInitResponse + if err = cbor.Unmarshal(rt.ExtraInfo, &signedInitResponse); err != nil { + return nil, fmt.Errorf("failed to unmarshal extrainfo") + } + + return &signedInitResponse.InitResponse, nil +} + +// UpdateEnclavePolicies updates enclave policies with a new runtime deployment. +func (sc *Scenario) UpdateEnclavePolicies(rt *oasis.Runtime, deploymentIndex int, policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) { + enclaveID := rt.GetEnclaveIdentity(deploymentIndex) + if enclaveID == nil { + return + } + + switch rt.Kind() { + case registry.KindKeyManager: + // Allow key manager runtime to replicate from all existing key managers. + for _, policy := range policies { + policy.MayReplicate = append(policy.MayReplicate, *enclaveID) + } + + // Allow all runtimes to query the new key manager runtime. + newPolicy := keymanager.EnclavePolicySGX{ + MayQuery: make(map[common.Namespace][]sgx.EnclaveIdentity), + MayReplicate: make([]sgx.EnclaveIdentity, 0), + } + for _, policy := range policies { + for rt, enclaves := range policy.MayQuery { + // Allowing duplicates, not important. + newPolicy.MayQuery[rt] = append(newPolicy.MayQuery[rt], enclaves...) + } + } + + policies[*enclaveID] = &newPolicy + case registry.KindCompute: + // Allow compute runtime to query all existing key managers. + for _, policy := range policies { + policy.MayQuery[rt.ID()] = append(policy.MayQuery[rt.ID()], *enclaveID) + } + default: + // Skip other kinds. + } +} + +// BuildAllEnclavePolicies builds enclave policies for all key manager runtimes. +// +// Policies are built from the fixture and adhere to the following rules: +// - Each SGX runtime must have only one deployment and a distinct enclave identity. +// - Key manager enclaves are not allowed to replicate the master secrets. +// - All compute runtime enclaves are allowed to query key manager enclaves. +func (sc *Scenario) BuildAllEnclavePolicies(childEnv *env.Env) (map[common.Namespace]map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, error) { + sc.Logger.Info("building key manager SGX policy enclave policies map") + + kmPolicies := make(map[common.Namespace]map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) + + // Each SGX runtime must have only one deployment. + for _, rt := range sc.Net.Runtimes() { + if len(rt.ToRuntimeDescriptor().Deployments) != 1 { + return nil, fmt.Errorf("runtime should have only one deployment") + } + } + + // Each SGX runtime must have a distinct enclave identity. + enclaveIDs := make(map[string]struct{}) + for _, rt := range sc.Net.Runtimes() { + enclaveID := rt.GetEnclaveIdentity(0) + if enclaveID == nil { + continue + } + enclaveIDText, err := enclaveID.MarshalText() + if err != nil { + return nil, fmt.Errorf("failed to marshal enclave identity: %w", err) + } + if _, ok := enclaveIDs[string(enclaveIDText)]; ok { + return nil, fmt.Errorf("enclave identities are not unique") + } + enclaveIDs[string(enclaveIDText)] = struct{}{} + } + + // Prepare empty policies for all key managers. + for _, rt := range sc.Net.Runtimes() { + if rt.Kind() != registry.KindKeyManager { + continue + } + + enclaveID := rt.GetEnclaveIdentity(0) + if enclaveID == nil { + continue + } + + if _, ok := kmPolicies[rt.ID()]; ok { + return nil, fmt.Errorf("duplicate key manager runtime: %s", rt.ID()) + } + + kmPolicies[rt.ID()] = map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX{ + *enclaveID: { + MayQuery: make(map[common.Namespace][]sgx.EnclaveIdentity), + MayReplicate: make([]sgx.EnclaveIdentity, 0), + }, + } + } + + // Allow all compute runtime enclaves to query key manager enclave. + for _, rt := range sc.Net.Runtimes() { + if rt.Kind() != registry.KindCompute { + continue + } + + enclaveID := rt.GetEnclaveIdentity(0) + if enclaveID == nil { + continue + } + + // Skip if the key manager runtime is not available. + kmRtID := rt.ToRuntimeDescriptor().KeyManager + policies, ok := kmPolicies[*kmRtID] + if !ok { + continue + } + + for _, policy := range policies { + policy.MayQuery[rt.ID()] = append(policy.MayQuery[rt.ID()], *enclaveID) + } + } + + return kmPolicies, nil +} + +// BuildEnclavePolicies builds enclave policies for the simple key manager runtime. +// +// If the simple key manager runtime does not exist or is not running on an SGX platform, +// it returns nil. +func (sc *Scenario) BuildEnclavePolicies(childEnv *env.Env) (map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, error) { + policies, err := sc.BuildAllEnclavePolicies(childEnv) + if err != nil { + return nil, err + } + return policies[KeyManagerRuntimeID], nil +} + +// ApplyKeyManagerPolicy applies the given policy to the simple key manager runtime. +func (sc *Scenario) ApplyKeyManagerPolicy(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rotationInterval beacon.EpochTime, policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, nonce uint64) error { + status, err := sc.KeyManagerStatus(ctx) + if err != nil && err != keymanager.ErrNoSuchStatus { + return err + } + + serial := uint32(1) + if status != nil && status.Policy != nil { + serial = status.Policy.Policy.Serial + 1 + } + + dir := childEnv.Dir() + policyPath := filepath.Join(dir, "km_policy.cbor") + sig1Path := filepath.Join(dir, "km_policy_sig1.pem") + sig2Path := filepath.Join(dir, "km_policy_sig2.pem") + sig3Path := filepath.Join(dir, "km_policy_sig3.pem") + txPath := filepath.Join(dir, "km_gen_update.json") + + sc.Logger.Info("generating key manager policy") + if err := cli.Keymanager.InitPolicy(KeyManagerRuntimeID, serial, rotationInterval, policies, policyPath); err != nil { + return err + } + sc.Logger.Info("signing key manager policy") + if err := cli.Keymanager.SignPolicy("1", policyPath, sig1Path); err != nil { + return err + } + if err := cli.Keymanager.SignPolicy("2", policyPath, sig2Path); err != nil { + return err + } + if err := cli.Keymanager.SignPolicy("3", policyPath, sig3Path); err != nil { + return err + } + + sc.Logger.Info("updating key manager policy") + if err := cli.Keymanager.GenUpdate(nonce, policyPath, []string{sig1Path, sig2Path, sig3Path}, txPath); err != nil { + return err + } + if err := cli.Consensus.SubmitTx(txPath); err != nil { + return fmt.Errorf("failed to update key manager policy: %w", err) + } + + return nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_network.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_network.go new file mode 100644 index 00000000000..15ef4cc8d0d --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_network.go @@ -0,0 +1,214 @@ +package runtime + +import ( + "context" + "fmt" + "time" + + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" +) + +// StartNetworkAndWaitForClientSync starts the network and waits for the client node to sync. +func (sc *Scenario) StartNetworkAndWaitForClientSync(ctx context.Context) error { + if err := sc.Net.Start(); err != nil { + return err + } + + return sc.WaitForClientSync(ctx) +} + +// StartNetworkAndTestClient starts the network and the runtime test client. +func (sc *Scenario) StartNetworkAndTestClient(ctx context.Context, childEnv *env.Env) error { + if err := sc.StartNetworkAndWaitForClientSync(ctx); err != nil { + return fmt.Errorf("failed to initialize network: %w", err) + } + + return sc.StartTestClient(ctx, childEnv) +} + +// StartTestClient initializes and starts the runtime test client. +func (sc *Scenario) StartTestClient(ctx context.Context, childEnv *env.Env) error { + if err := sc.TestClient.Init(sc); err != nil { + return fmt.Errorf("failed to initialize test client: %w", err) + } + + if err := sc.TestClient.Start(ctx, childEnv); err != nil { + return fmt.Errorf("failed to start test client: %w", err) + } + + return nil +} + +// RunTestClientAndCheckLogs initializes and starts the runtime test client, +// waits for the runtime test client to finish its work and then verifies the logs. +func (sc *Scenario) RunTestClientAndCheckLogs(ctx context.Context, childEnv *env.Env) error { + if err := sc.StartTestClient(ctx, childEnv); err != nil { + return err + } + + return sc.WaitTestClientAndCheckLogs() +} + +// WaitNodesSynced waits for all the nodes to sync. +func (sc *Scenario) WaitNodesSynced(ctx context.Context) error { + checkSynced := func(n *oasis.Node) error { + c, err := oasis.NewController(n.SocketPath()) + if err != nil { + return fmt.Errorf("failed to create node controller: %w", err) + } + defer c.Close() + + if err = c.WaitSync(ctx); err != nil { + return fmt.Errorf("failed to wait for node to sync: %w", err) + } + return nil + } + + sc.Logger.Info("waiting for all nodes to be synced") + + for _, n := range sc.Net.Validators() { + if err := checkSynced(n.Node); err != nil { + return err + } + } + for _, n := range sc.Net.Keymanagers() { + if err := checkSynced(n.Node); err != nil { + return err + } + } + for _, n := range sc.Net.ComputeWorkers() { + if err := checkSynced(n.Node); err != nil { + return err + } + } + for _, n := range sc.Net.Clients() { + if err := checkSynced(n.Node); err != nil { + return err + } + } + + sc.Logger.Info("nodes synced") + return nil +} + +// WaitForClientSync waits for the first client to sync. +func (sc *Scenario) WaitForClientSync(ctx context.Context) error { + clients := sc.Net.Clients() + if len(clients) == 0 { + return fmt.Errorf("scenario/e2e: network has no client nodes") + } + + sc.Logger.Info("ensuring client node is synced") + ctrl, err := oasis.NewController(clients[0].SocketPath()) + if err != nil { + return fmt.Errorf("failed to create controller for client: %w", err) + } + if err = ctrl.WaitSync(ctx); err != nil { + return fmt.Errorf("client-0 failed to sync: %w", err) + } + + return nil +} + +// WaitTestClient waits for the runtime test client to finish its work. +func (sc *Scenario) WaitTestClient() error { + sc.Logger.Info("waiting for test client to exit") + return sc.TestClient.Wait() +} + +// WaitTestClientAndCheckLogs waits for the runtime test client to finish its work +// and then verifies the logs. +func (sc *Scenario) WaitTestClientAndCheckLogs() error { + if err := sc.WaitTestClient(); err != nil { + return err + } + return sc.checkTestClientLogs() +} + +func (sc *Scenario) checkTestClientLogs() error { + sc.Logger.Info("checking test client logs") + + // Wait for logs to be fully processed before checking them. When + // the client exits very quickly the log watchers may not have + // processed the relevant logs yet. + // + // TODO: Find a better way to synchronize log watchers. + time.Sleep(1 * time.Second) + + return sc.Net.CheckLogWatchers() +} + +// StartKeymanagers starts the specified key manager nodes. +func (sc *Scenario) StartKeymanagers(ctx context.Context, idxs []int) error { + sc.Logger.Info("starting the key managers", "ids", fmt.Sprintf("%+v", idxs)) + + kms := sc.Net.Keymanagers() + for _, idx := range idxs { + if err := kms[idx].Start(); err != nil { + return err + } + } + return nil +} + +// StopKeymanagers stops the specified key manager nodes. +func (sc *Scenario) StopKeymanagers(ctx context.Context, idxs []int) error { + sc.Logger.Info("stopping the key managers", "ids", fmt.Sprintf("%+v", idxs)) + + kms := sc.Net.Keymanagers() + for _, idx := range idxs { + if err := kms[idx].Stop(); err != nil { + return err + } + } + return nil +} + +// RestartKeymanagers restarts the specified key manager nodes. +func (sc *Scenario) RestartKeymanagers(ctx context.Context, idxs []int) error { + sc.Logger.Info("restarting the key managers", "ids", fmt.Sprintf("%+v", idxs)) + + kms := sc.Net.Keymanagers() + for _, idx := range idxs { + if err := kms[idx].Restart(ctx); err != nil { + return err + } + } + return nil +} + +// WaitKeymanagers waits for the specified key manager nodes to become ready. +func (sc *Scenario) WaitKeymanagers(ctx context.Context, idxs []int) error { + sc.Logger.Info("waiting for the key managers to become ready", "ids", fmt.Sprintf("%+v", idxs)) + + kms := sc.Net.Keymanagers() + for _, idx := range idxs { + kmCtrl, err := oasis.NewController(kms[idx].SocketPath()) + if err != nil { + return err + } + if err = kmCtrl.WaitReady(ctx); err != nil { + return err + } + } + return nil +} + +// StartAndWaitKeymanagers starts the specified key manager nodes and waits +// for them to become ready. +func (sc *Scenario) StartAndWaitKeymanagers(ctx context.Context, idxs []int) error { + if err := sc.StartKeymanagers(ctx, idxs); err != nil { + return err + } + return sc.WaitKeymanagers(ctx, idxs) +} + +// RestartAndWaitKeymanagers restarts the specified key manager nodes and waits +// for them to become ready. +func (sc *Scenario) RestartAndWaitKeymanagers(ctx context.Context, idxs []int) error { + if err := sc.RestartKeymanagers(ctx, idxs); err != nil { + return err + } + return sc.WaitKeymanagers(ctx, idxs) +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go new file mode 100644 index 00000000000..bbe6b4a7452 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go @@ -0,0 +1,568 @@ +package runtime + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + "time" + + "github.com/hashicorp/go-multierror" + + "github.com/oasisprotocol/oasis-core/go/common" + "github.com/oasisprotocol/oasis-core/go/common/cbor" + "github.com/oasisprotocol/oasis-core/go/common/node" + "github.com/oasisprotocol/oasis-core/go/common/sgx" + "github.com/oasisprotocol/oasis-core/go/common/version" + consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" + keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/rust" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario/e2e" + registry "github.com/oasisprotocol/oasis-core/go/registry/api" + commonWorker "github.com/oasisprotocol/oasis-core/go/worker/common/api" +) + +var ( + // KeyValueRuntimeBinary is the name of the simple key/value runtime binary. + KeyValueRuntimeBinary = "simple-keyvalue" + // KeyValueRuntimeUpgradeBinary is the name of the upgraded simple key/value runtime binary. + KeyValueRuntimeUpgradeBinary = "simple-keyvalue-upgrade" + // KeyManagerRuntimeBinary is the name of the simple key manager runtime binary. + KeyManagerRuntimeBinary = "simple-keymanager" + // KeyManagerRuntimeUpgradeBinary is the name of the upgraded simple key manager runtime binary. + KeyManagerRuntimeUpgradeBinary = "simple-keymanager-upgrade" + + // KeyValueRuntimeID is the ID of the simple key/value runtime. + KeyValueRuntimeID common.Namespace + // KeyManagerRuntimeID is the ID of the key manager runtime. + KeyManagerRuntimeID common.Namespace + + _ = KeyManagerRuntimeID.UnmarshalHex("c000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff") + _ = KeyValueRuntimeID.UnmarshalHex("8000000000000000000000000000000000000000000000000000000000000000") +) + +// ResolveRuntimeBinaries returns the paths to the runtime binaries. +func (sc *Scenario) ResolveRuntimeBinaries(baseRuntimeBinary string) map[node.TEEHardware]string { + binaries := make(map[node.TEEHardware]string) + for _, tee := range []node.TEEHardware{ + node.TEEHardwareInvalid, + node.TEEHardwareIntelSGX, + } { + binaries[tee] = sc.ResolveRuntimeBinary(baseRuntimeBinary, tee) + } + return binaries +} + +// ResolveRuntimeBinary returns the path to the runtime binary. +func (sc *Scenario) ResolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string { + var runtimeExt, path string + switch tee { + case node.TEEHardwareInvalid: + runtimeExt = "" + path, _ = sc.Flags.GetString(cfgRuntimeBinaryDirDefault) + case node.TEEHardwareIntelSGX: + runtimeExt = ".sgxs" + path, _ = sc.Flags.GetString(cfgRuntimeBinaryDirIntelSGX) + } + + return filepath.Join(path, runtimeBinary+runtimeExt) +} + +// BuildRuntimes builds the specified runtime binaries using the provided trust root, if given. +func (sc *Scenario) BuildRuntimes(ctx context.Context, childEnv *env.Env, runtimes map[common.Namespace]string, trustRoot *e2e.TrustRoot) error { + // Determine the required directories for building the runtime with an embedded trust root. + buildDir, targetDir, err := sc.BuildTargetDirs() + if err != nil { + return err + } + + // Determine TEE hardware. + teeHardware, err := sc.TEEHardware() + if err != nil { + return err + } + + // Prepare the builder. + builder := rust.NewBuilder(childEnv, buildDir, targetDir, teeHardware) + + // Build runtimes one by one. + var errs *multierror.Error + for runtimeID, runtimeBinary := range runtimes { + switch trustRoot { + case nil: + sc.Logger.Info("building runtime without embedded trust root", + "runtime_id", runtimeID, + "runtime_binary", runtimeBinary, + ) + default: + sc.Logger.Info("building runtime with embedded trust root", + "runtime_id", runtimeID, + "runtime_binary", runtimeBinary, + "trust_root_height", trustRoot.Height, + "trust_root_hash", trustRoot.Hash, + "trust_root_chain_context", trustRoot.ChainContext, + ) + + // Prepare environment. + builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_HEIGHT", trustRoot.Height) + builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_HASH", trustRoot.Hash) + builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_CHAIN_CONTEXT", trustRoot.ChainContext) + builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_RUNTIME_ID", runtimeID.String()) + } + + // Build a new runtime with the given trust root embedded. + if err = builder.Build(runtimeBinary); err != nil { + errs = multierror.Append(errs, err) + } + } + if err = errs.ErrorOrNil(); err != nil { + return fmt.Errorf("failed to build runtimes: %w", err) + } + + return nil +} + +// BuildAllRuntimes builds all runtime binaries, i.e. the key/value and the key manager runtime. +func (sc *Scenario) BuildAllRuntimes(ctx context.Context, childEnv *env.Env, trustRoot *e2e.TrustRoot) error { + runtimes := map[common.Namespace]string{ + KeyValueRuntimeID: KeyValueRuntimeBinary, + KeyManagerRuntimeID: KeyManagerRuntimeBinary, + } + + return sc.BuildRuntimes(ctx, childEnv, runtimes, trustRoot) +} + +// EnsureActiveVersionForComputeWorker ensures that the specified compute worker +// has the correct active version of the given runtime. +func (sc *Scenario) EnsureActiveVersionForComputeWorker(ctx context.Context, node *oasis.Compute, rt *oasis.Runtime, v version.Version) error { + ctx, cancel := context.WithTimeout(ctx, versionActivationTimeout) + defer cancel() + + sc.Logger.Info("ensuring that the compute worker has the correct active version", + "node", node.Name, + "runtime_id", rt.ID(), + "version", v, + ) + + nodeCtrl, err := oasis.NewController(node.SocketPath()) + if err != nil { + return fmt.Errorf("%s: failed to create controller: %w", node.Name, err) + } + + // Wait for the version to become active and ensure no suspension observed. + for { + status, err := nodeCtrl.GetStatus(ctx) + if err != nil { + return fmt.Errorf("%s: failed to query status: %w", node.Name, err) + } + + provisioner := status.Runtimes[rt.ID()].Provisioner + if provisioner != "sandbox" && provisioner != "sgx" { + return fmt.Errorf("%s: unexpected runtime provisioner for runtime '%s': %s", node.Name, rt.ID(), provisioner) + } + + cs := status.Runtimes[rt.ID()].Committee + if cs == nil { + return fmt.Errorf("%s: missing status for runtime '%s'", node.Name, rt.ID()) + } + + if cs.ActiveVersion == nil { + return fmt.Errorf("%s: no version is active", node.Name) + } + // Retry if not yet activated. + if cs.ActiveVersion.ToU64() < v.ToU64() { + time.Sleep(1 * time.Second) + continue + } + if *cs.ActiveVersion != v { + return fmt.Errorf("%s: unexpected active version (expected: %s got: %s)", node.Name, v, cs.ActiveVersion) + } + if cs.Status != commonWorker.StatusStateReady { + return fmt.Errorf("%s: runtime is not ready (got: %s)", node.Name, cs.Status) + } + break + } + + return nil +} + +// EnsureActiveVersionForComputeWorkers ensures that all compute workers +// have the correct active version of the given runtime. +func (sc *Scenario) EnsureActiveVersionForComputeWorkers(ctx context.Context, rt *oasis.Runtime, v version.Version) error { + sc.Logger.Info("ensuring that all compute workers have the correct active version", + "runtime_id", rt.ID(), + "version", v, + ) + + for _, node := range sc.Net.ComputeWorkers() { + if err := sc.EnsureActiveVersionForComputeWorker(ctx, node, rt, v); err != nil { + return err + } + } + return nil +} + +// EnsureActiveVersionForKeyManager ensures that the specified key manager +// has the correct active version of the given runtime. +func (sc *Scenario) EnsureActiveVersionForKeyManager(ctx context.Context, node *oasis.Keymanager, id common.Namespace, v version.Version) error { + ctx, cancel := context.WithTimeout(ctx, versionActivationTimeout) + defer cancel() + + sc.Logger.Info("ensuring that the key manager has the correct active version", + "node", node.Name, + "runtime_id", id, + "version", v, + ) + + nodeCtrl, err := oasis.NewController(node.SocketPath()) + if err != nil { + return fmt.Errorf("%s: failed to create controller: %w", node.Name, err) + } + + // Wait for the version to become active. + for { + status, err := nodeCtrl.GetStatus(ctx) + if err != nil { + return fmt.Errorf("%s: failed to query status: %w", node.Name, err) + } + + if status.Keymanager == nil { + return fmt.Errorf("%s: missing key manager status", node.Name) + } + + ws := status.Keymanager.WorkerStatus + if !id.Equal(ws.RuntimeID) { + return fmt.Errorf("%s: unsupported runtime (expected: %s got: %s)", node.Name, ws.RuntimeID, id) + } + + if ws.ActiveVersion == nil { + return fmt.Errorf("%s: no version is active", node.Name) + } + // Retry if not yet activated. + if ws.ActiveVersion.ToU64() < v.ToU64() { + time.Sleep(1 * time.Second) + continue + } + if *ws.ActiveVersion != v { + return fmt.Errorf("%s: unexpected active version (expected: %s got: %s)", node.Name, v, ws.ActiveVersion) + } + break + } + + return nil +} + +// EnsureActiveVersionForKeyManagers ensures that all key managers +// have the correct active version of the given runtime. +func (sc *Scenario) EnsureActiveVersionForKeyManagers(ctx context.Context, id common.Namespace, v version.Version) error { + sc.Logger.Info("ensuring that all key managers have the correct active version", + "runtime_id", id, + "version", v, + ) + + for _, node := range sc.Net.Keymanagers() { + if err := sc.EnsureActiveVersionForKeyManager(ctx, node, id, v); err != nil { + return err + } + } + + return nil +} + +// EnableRuntimeDeployment registers the specified runtime deployment, updates the key manager +// policy, and waits until the deployment becomes active. +func (sc *Scenario) EnableRuntimeDeployment(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rt *oasis.Runtime, deploymentIndex int, nonce uint64) error { + sc.Logger.Info("enabling runtime deployment", + "runtime_id", rt.ID(), + "deployment", deploymentIndex, + ) + + // Update the key manager policy. + status, err := sc.KeyManagerStatus(ctx) + if err != nil && err != keymanager.ErrNoSuchStatus { + return err + } + var policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX + if status != nil && status.Policy != nil { + policies = status.Policy.Policy.Enclaves + } + switch policies { + case nil: + sc.Logger.Info("no SGX runtimes, skipping policy update") + default: + sc.UpdateEnclavePolicies(rt, deploymentIndex, policies) + if err = sc.ApplyKeyManagerPolicy(ctx, childEnv, cli, 0, policies, nonce); err != nil { + return fmt.Errorf("updating policies: %w", err) + } + nonce++ + } + + // Fetch current epoch. + epoch, err := sc.Net.Controller().Beacon.GetEpoch(ctx, consensus.HeightLatest) + if err != nil { + return fmt.Errorf("failed to get current epoch: %w", err) + } + + // The upgrade epoch should be set to at least 3 to provide compute workers with enough time + // to prepare for the upgrade. If it is set too low, the runtime will be suspended due to + // a lack of eligible compute workers. + upgradeEpoch := epoch + 3 + + // Fetch old deployment. + oldRtDsc, err := sc.Net.Controller().Registry.GetRuntime(ctx, ®istry.GetRuntimeQuery{ + Height: consensus.HeightLatest, + ID: rt.ID(), + }) + if err != nil { + return fmt.Errorf("failed to get runtime descriptor: %w", err) + } + + // Update runtime to include the new enclave identity. + newRtDsc := rt.ToRuntimeDescriptor() + newRtDpl := newRtDsc.Deployments[deploymentIndex] + newRtDpl.ValidFrom = upgradeEpoch + newRtDsc.Deployments = append(oldRtDsc.Deployments, newRtDpl) + + sc.Logger.Info("updating runtime descriptor", + "runtime_id", rt.ID(), + "version", newRtDpl.Version, + "valid_from", newRtDpl.ValidFrom, + ) + + if err = sc.RegisterRuntime(ctx, childEnv, cli, newRtDsc, nonce); err != nil { + return err + } + nonce++ // nolint: ineffassign + + // Wait for activation epoch. + sc.Logger.Info("waiting for runtime upgrade epoch", + "runtime_id", rt.ID(), + "epoch", upgradeEpoch, + ) + if err := sc.Net.Controller().Beacon.WaitEpoch(ctx, upgradeEpoch); err != nil { + return fmt.Errorf("failed to wait for epoch: %w", err) + } + + return nil +} + +// UpgradeComputeRuntimeFixture select the first compute runtime and prepares it for the upgrade. +func (sc *Scenario) UpgradeComputeRuntimeFixture(f *oasis.NetworkFixture) (int, error) { + // Select the first compute runtime for upgrade. + idx := -1 + for i := range f.Runtimes { + if f.Runtimes[i].Kind == registry.KindCompute { + idx = i + break + } + } + if idx == -1 { + return 0, fmt.Errorf("expected at least one compute runtime in the fixture, none found") + } + + // Load the upgraded runtime binary. + newRuntimeBinaries := sc.ResolveRuntimeBinaries(KeyValueRuntimeUpgradeBinary) + + // Create a duplicate runtime, which will be added to the genesis. + f.Runtimes = append(f.Runtimes, f.Runtimes[idx]) + + // The original runtime will be excluded from the genesis and registered later. + // Note that if the runtime bundles already exist (e.g. after the dump-restore upgrade), + // they will be retained. + f.Runtimes[idx].ExcludeFromGenesis = true + f.Runtimes[idx].Deployments = append(f.Runtimes[idx].Deployments, oasis.DeploymentCfg{ + Version: version.Version{Major: 0, Minor: 1, Patch: 0}, + Binaries: newRuntimeBinaries, + }) + + return idx, nil +} + +// UpgradeComputeRuntime upgrades the specified compute runtime. +func (sc *Scenario) UpgradeComputeRuntime(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, idx int, nonce uint64) error { + newRt := sc.Net.Runtimes()[idx] + + // Make sure the old version is active on all compute nodes. + if err := sc.EnsureActiveVersionForComputeWorkers(ctx, newRt, version.MustFromString("0.0.0")); err != nil { + return err + } + + // Transition to the new version. + if err := sc.EnableRuntimeDeployment(ctx, childEnv, cli, newRt, 1, nonce); err != nil { + return err + } + + // Make sure the new version is active. + if err := sc.EnsureActiveVersionForComputeWorkers(ctx, newRt, version.MustFromString("0.1.0")); err != nil { + return err + } + + return nil +} + +// UpgradeKeyManagerFixture select the first key manager runtime and prepares it for the upgrade. +func (sc *Scenario) UpgradeKeyManagerFixture(f *oasis.NetworkFixture) (int, error) { + // Select the first key manager for upgrade. + idx := -1 + for i := range f.Runtimes { + if f.Runtimes[i].Kind == registry.KindKeyManager { + idx = i + break + } + } + if idx == -1 { + return 0, fmt.Errorf("expected at least one key manager in the fixture, none found") + } + + // Load the upgraded key manager binary. + newRuntimeBinaries := sc.ResolveRuntimeBinaries(KeyManagerRuntimeUpgradeBinary) + + // Create a duplicate runtime, which will be added to the genesis latter. + newRt := f.Runtimes[idx] + newRt.ExcludeFromGenesis = true + newRt.Deployments = []oasis.DeploymentCfg{ + { + Version: version.Version{Major: 0, Minor: 1, Patch: 0}, + Binaries: newRuntimeBinaries, + }, + } + f.Runtimes = append(f.Runtimes, newRt) + + // Keep the original runtime intact, and return index of the new one. + idx = len(f.Runtimes) - 1 + + // Add the upgraded key manager, which will be started later. + f.Keymanagers = append(f.Keymanagers, oasis.KeymanagerFixture{ + NodeFixture: oasis.NodeFixture{ + NoAutoStart: true, + }, + Runtime: idx, + Entity: 1, + }) + + // Allow keymanager-0 to exit after replication is done. + f.Keymanagers[0].AllowEarlyTermination = true + + return idx, nil +} + +func (sc *Scenario) UpgradeKeyManager(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, idx int, nonce uint64) error { + oldKm := sc.Net.Keymanagers()[0] + newKm := sc.Net.Keymanagers()[1] + newRt := sc.Net.Runtimes()[idx] + + // Make sure the old version is active on the first key manager node. + if err := sc.EnsureActiveVersionForKeyManager(ctx, oldKm, newRt.ID(), version.MustFromString("0.0.0")); err != nil { + return err + } + + // Transition to the new version. + if err := sc.EnableRuntimeDeployment(ctx, childEnv, cli, newRt, 0, nonce); err != nil { + return err + } + + // Start the new keymanager. + sc.Logger.Info("starting new keymanager") + if err := newKm.Start(); err != nil { + return fmt.Errorf("starting new key manager: %w", err) + } + + // Wait for the new node to register. + sc.Logger.Info("waiting for new keymanager node to register", + "num_nodes", sc.Net.NumRegisterNodes(), + ) + if err := newKm.WaitReady(ctx); err != nil { + return fmt.Errorf("error waiting for new keymanager to be ready: %w", err) + } + + // Ensure replication succeeded. + if err := sc.ensureReplicationWorked(ctx, newKm, newRt); err != nil { + return err + } + + nodeCh, nodeSub, err := sc.Net.Controller().Registry.WatchNodes(ctx) + if err != nil { + return fmt.Errorf("failed to watch nodes: %w", err) + } + defer nodeSub.Close() + + // Shutdown old keymanager and make sure it de-registers. + sc.Logger.Info("shutting down old keymanager") + + if err := oldKm.RequestShutdown(ctx, true); err != nil { + return fmt.Errorf("failed to request shutdown: %w", err) + } + + // Ensure keymanager de-registers. +OUTER: + for { + select { + case ev := <-nodeCh: + if !ev.IsRegistration && ev.Node.ID.Equal(oldKm.NodeID) { + break OUTER + } + case <-time.After(10 * time.Second): + return fmt.Errorf("failed to wait for keymanager to de-register") + } + } + + // Make sure the new version is active on the second key manager node. + if err := sc.EnsureActiveVersionForKeyManager(ctx, newKm, newRt.ID(), version.MustFromString("0.1.0")); err != nil { + return err + } + + return nil +} + +func (sc *Scenario) ensureReplicationWorked(ctx context.Context, km *oasis.Keymanager, rt *oasis.Runtime) error { + ctrl, err := oasis.NewController(km.SocketPath()) + if err != nil { + return err + } + node, err := ctrl.Registry.GetNode( + ctx, + ®istry.IDQuery{ + ID: km.NodeID, + }, + ) + if err != nil { + return err + } + nodeRt := node.GetRuntime(rt.ID(), version.Version{Major: 0, Minor: 1, Patch: 0}) + if nodeRt == nil { + return fmt.Errorf("node is missing keymanager runtime from descriptor") + } + var signedInitResponse keymanager.SignedInitResponse + if err = cbor.Unmarshal(nodeRt.ExtraInfo, &signedInitResponse); err != nil { + return fmt.Errorf("failed to unmarshal replica extrainfo") + } + + // Grab a state dump and ensure all keymanager nodes have a matching + // checksum. + doc, err := ctrl.Consensus.StateToGenesis(ctx, 0) + if err != nil { + return fmt.Errorf("failed to obtain consensus state: %w", err) + } + if err = func() error { + for _, status := range doc.KeyManager.Statuses { + if !status.ID.Equal(&nodeRt.ID) { + continue + } + if !status.IsInitialized { + return fmt.Errorf("key manager failed to initialize") + } + if !bytes.Equal(status.Checksum, signedInitResponse.InitResponse.Checksum) { + return fmt.Errorf("key manager failed to replicate, checksum mismatch") + } + return nil + } + return fmt.Errorf("consensus state missing km status") + }(); err != nil { + return err + } + + return nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/history_reindex.go b/go/oasis-test-runner/scenario/e2e/runtime/history_reindex.go index 437312e268c..bdd4ab18fad 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/history_reindex.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/history_reindex.go @@ -3,7 +3,6 @@ package runtime import ( "context" "fmt" - "path/filepath" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" @@ -30,7 +29,7 @@ func newHistoryReindexImpl() scenario.Scenario { return &historyReindexImpl{ Scenario: *NewScenario( "history-reindex", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -141,14 +140,10 @@ func (sc *historyReindexImpl) Run(ctx context.Context, childEnv *env.Env) error // Register runtime. compRt := sc.Net.Runtimes()[rtIdx] - txPath := filepath.Join(childEnv.Dir(), "register_compute_runtime.json") rtDsc := compRt.ToRuntimeDescriptor() rtDsc.Deployments[0].ValidFrom = epoch + 1 - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), compRt.ToRuntimeDescriptor(), 0, txPath); err != nil { - return fmt.Errorf("failed to generate register compute runtime tx: %w", err) - } - if err = cli.Consensus.SubmitTx(txPath); err != nil { - return fmt.Errorf("failed to register compute runtime: %w", err) + if err = sc.RegisterRuntime(ctx, childEnv, cli, rtDsc, 0); err != nil { + return err } // Wait for the compute worker to be ready. @@ -163,9 +158,5 @@ func (sc *historyReindexImpl) Run(ctx context.Context, childEnv *env.Env) error // Run client to ensure runtime works. sc.Logger.Info("Starting the basic client") - if err = sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - - return sc.waitTestClient() + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_client.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_client.go index 9587d881f9e..fd5a47fc201 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_client.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_client.go @@ -51,7 +51,7 @@ func newKeyManagerRPCClient(chainContext string) (*keyManagerRPCClient, error) { return nil, err } - pid := protocol.NewRuntimeProtocolID(chainContext, keymanagerID, kmp2p.KeyManagerProtocolID, kmp2p.KeyManagerProtocolVersion) + pid := protocol.NewRuntimeProtocolID(chainContext, KeyManagerRuntimeID, kmp2p.KeyManagerProtocolID, kmp2p.KeyManagerProtocolVersion) client := rpc.NewClient(host, pid) return &keyManagerRPCClient{ @@ -84,7 +84,7 @@ func (c *keyManagerRPCClient) addKeyManagerAddrToHost(km *oasis.Keymanager) (pee func (c *keyManagerRPCClient) fetchPublicKey(ctx context.Context, generation uint64, peerID peer.ID) (*x25519.PublicKey, error) { args := keymanager.LongTermKeyRequest{ Height: nil, - ID: keymanagerID, + ID: KeyManagerRuntimeID, KeyPairID: keymanager.KeyPairID{1, 2, 3}, Generation: generation, } @@ -129,7 +129,7 @@ func (c *keyManagerRPCClient) fetchPublicKey(ctx context.Context, generation uin func (c *keyManagerRPCClient) fetchEphemeralPublicKey(ctx context.Context, epoch beacon.EpochTime, peerID peer.ID) (*x25519.PublicKey, error) { args := keymanager.EphemeralKeyRequest{ Height: nil, - ID: keymanagerID, + ID: KeyManagerRuntimeID, KeyPairID: keymanager.KeyPairID{1, 2, 3}, Epoch: epoch, } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_dump_restore.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_dump_restore.go index 15bfd59aa84..0f079024207 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_dump_restore.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_dump_restore.go @@ -7,6 +7,7 @@ import ( beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" ) @@ -20,15 +21,13 @@ var KeymanagerDumpRestore scenario.Scenario = newKmDumpRestoreImpl() type kmDumpRestoreImpl struct { Scenario - - nonce uint64 } func newKmDumpRestoreImpl() scenario.Scenario { return &kmDumpRestoreImpl{ Scenario: *NewScenario( "keymanager-dump-restore", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -64,13 +63,15 @@ func (sc *kmDumpRestoreImpl) Clone() scenario.Scenario { } func (sc *kmDumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) (err error) { // nolint: gocyclo + cli := cli.New(childEnv, sc.Net, sc.Logger) + // Start the network. if err = sc.StartNetworkAndWaitForClientSync(ctx); err != nil { return err } // Wait until the first master secret is generated. - if _, err = sc.waitMasterSecret(ctx, 0); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 0); err != nil { return err } @@ -86,13 +87,15 @@ func (sc *kmDumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) (err er return err } + cli.SetConfig(sc.Net.GetCLIConfig()) + // Start the network. if err = sc.StartNetworkAndWaitForClientSync(ctx); err != nil { return err } // Make sure the last secret was not preserved. - secret, err := sc.keymanagerMasterSecret(ctx) + secret, err := sc.MasterSecret(ctx) if err != nil { return err } @@ -101,7 +104,7 @@ func (sc *kmDumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) (err er } // Make sure the manager is initialized. - status, err := sc.keymanagerStatus(ctx) + status, err := sc.KeyManagerStatus(ctx) if err != nil { return err } @@ -110,21 +113,20 @@ func (sc *kmDumpRestoreImpl) Run(ctx context.Context, childEnv *env.Env) (err er } // Start both key manager nodes. - if err = sc.startAndWaitKeymanagers(ctx, []int{0, 1}); err != nil { + if err = sc.StartAndWaitKeymanagers(ctx, []int{0, 1}); err != nil { return err } // Test master secret rotations. To enable them, update the rotation interval in the policy. - if err = sc.updateRotationInterval(ctx, sc.nonce, childEnv, 1); err != nil { + if err = sc.UpdateRotationInterval(ctx, childEnv, cli, 1, 0); err != nil { return err } - sc.nonce++ - if _, err = sc.waitMasterSecret(ctx, 3); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 3); err != nil { return err } // Test if all key managers can derive keys from all master secrets. - if err = sc.compareLongtermPublicKeys(ctx, []int{0, 1}); err != nil { + if err = sc.CompareLongtermPublicKeys(ctx, []int{0, 1}); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_ephemeral_secrets.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_ephemeral_secrets.go index eb9f56ccf44..5fcdb94c97a 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_ephemeral_secrets.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_ephemeral_secrets.go @@ -44,7 +44,7 @@ func newKmEphemeralSecretsImpl() scenario.Scenario { return &kmEphemeralSecretsImpl{ Scenario: *NewScenario( "keymanager-ephemeral-secrets", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -81,7 +81,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er // Fetch runtime to know on which TEE platform the key manager is running. rt, err := sc.Net.ClientController().Registry.GetRuntime(ctx, ®istry.GetRuntimeQuery{ Height: consensus.HeightLatest, - ID: keymanagerID, + ID: KeyManagerRuntimeID, }) if err != nil { return err @@ -112,14 +112,14 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er } // Wait until the first key manager is ready. - if err = sc.waitKeymanagers(ctx, []int{0}); err != nil { + if err = sc.WaitKeymanagers(ctx, []int{0}); err != nil { return err } // Wait until the first ephemeral secret is published. sc.Logger.Info("waiting for the first ephemeral secret") - sigSecret, err := sc.waitEphemeralSecrets(ctx, 1) + sigSecret, err := sc.WaitEphemeralSecrets(ctx, 1) if err != nil { return err } @@ -165,7 +165,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er } // Restart the first key manager. - if err = sc.restartAndWaitKeymanagers(ctx, []int{0}); err != nil { + if err = sc.RestartAndWaitKeymanagers(ctx, []int{0}); err != nil { return err } @@ -184,7 +184,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er // Wait until the next ephemeral secret is published. sc.Logger.Info("waiting for the first ephemeral secret") - sigSecret, err = sc.waitEphemeralSecrets(ctx, 1) + sigSecret, err = sc.WaitEphemeralSecrets(ctx, 1) if err != nil { return err } @@ -217,7 +217,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er } // Start other key managers. - if err = sc.startAndWaitKeymanagers(ctx, []int{1, 2}); err != nil { + if err = sc.StartAndWaitKeymanagers(ctx, []int{1, 2}); err != nil { return err } @@ -340,7 +340,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("encrypting plaintext") ciphertext, err := sc.submitKeyValueRuntimeEncryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch, keyPairID, @@ -356,7 +356,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("decrypting ciphertext") decrypted, err := sc.submitKeyValueRuntimeDecryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch, keyPairID, @@ -375,7 +375,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("decrypting ciphertext with wrong epoch") decrypted, err = sc.submitKeyValueRuntimeDecryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch-1, keyPairID, @@ -391,7 +391,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("decrypting ciphertext with wrong key pair id") decrypted, err = sc.submitKeyValueRuntimeDecryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch, "wrong key pair id", @@ -412,7 +412,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("encrypting plaintext with invalid epoch") _, err = sc.submitKeyValueRuntimeEncryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch, keyPairID, @@ -428,7 +428,7 @@ func (sc *kmEphemeralSecretsImpl) Run(ctx context.Context, childEnv *env.Env) er sc.Logger.Info("decrypting ciphertext with invalid epoch") _, err = sc.submitKeyValueRuntimeDecryptTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), epoch, keyPairID, @@ -455,7 +455,7 @@ func (sc *kmEphemeralSecretsImpl) submitKeyValueRuntimeEncryptTx( keyPairID string, plaintext []byte, ) ([]byte, error) { - rawRsp, err := sc.submitRuntimeTx(ctx, runtimeID, nonce, "encrypt", struct { + rawRsp, err := sc.submitRuntimeTx(ctx, KeyValueRuntimeID, nonce, "encrypt", struct { Epoch uint64 `json:"epoch"` KeyPairID string `json:"key_pair_id"` Plaintext []byte `json:"plaintext"` @@ -484,7 +484,7 @@ func (sc *kmEphemeralSecretsImpl) submitKeyValueRuntimeDecryptTx( keyPairID string, ciphertext []byte, ) ([]byte, error) { - rawRsp, err := sc.submitRuntimeTx(ctx, runtimeID, nonce, "decrypt", struct { + rawRsp, err := sc.submitRuntimeTx(ctx, KeyValueRuntimeID, nonce, "decrypt", struct { Epoch uint64 `json:"epoch"` KeyPairID string `json:"key_pair_id"` Ciphertext []byte `json:"ciphertext"` @@ -508,7 +508,7 @@ func (sc *kmEphemeralSecretsImpl) submitKeyValueRuntimeDecryptTx( func (sc *kmEphemeralSecretsImpl) checkNumberOfKeyManagers(ctx context.Context, n int) error { status, err := sc.Net.Controller().Keymanager.GetStatus(ctx, ®istry.NamespaceQuery{ Height: consensus.HeightLatest, - ID: keymanagerID, + ID: KeyManagerRuntimeID, }) if err != nil { return err diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_master_secrets.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_master_secrets.go index 439be62ef4b..8e5d99cfda2 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_master_secrets.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_master_secrets.go @@ -9,6 +9,7 @@ import ( keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" ) @@ -25,7 +26,7 @@ func newKmMasterSecretsImpl() scenario.Scenario { return &kmMasterSecretsImpl{ Scenario: *NewScenario( "keymanager-master-secrets", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -59,6 +60,8 @@ func (sc *kmMasterSecretsImpl) Clone() scenario.Scenario { } func (sc *kmMasterSecretsImpl) Run(ctx context.Context, childEnv *env.Env) (err error) { // nolint: gocyclo + cli := cli.New(childEnv, sc.Net, sc.Logger) + // Start the network. if err = sc.Net.Start(); err != nil { return err @@ -76,23 +79,23 @@ func (sc *kmMasterSecretsImpl) Run(ctx context.Context, childEnv *env.Env) (err }() // Test that only one master secret is generated if rotations are disabled. - if _, err = sc.waitMasterSecret(ctx, 0); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 0); err != nil { return fmt.Errorf("master secret not generated: %w", err) } - if err = sc.waitEpochs(ctx, 5); err != nil { + if err = sc.WaitEpochs(ctx, 5); err != nil { return err } sc.Logger.Info("verifying that exactly one master secret has been generated") - status, err := sc.keymanagerStatus(ctx) + status, err := sc.KeyManagerStatus(ctx) if err != nil { return err } if !status.IsInitialized || len(status.Checksum) == 0 || status.Generation != 0 { return fmt.Errorf("exactly one master secret should be generated if rotation is disabled %+v", status) } - secret, err := sc.keymanagerMasterSecret(ctx) + secret, err := sc.MasterSecret(ctx) if err != nil { return err } @@ -101,44 +104,44 @@ func (sc *kmMasterSecretsImpl) Run(ctx context.Context, childEnv *env.Env) (err } // Enable master secret rotations. - if err = sc.updateRotationInterval(ctx, sc.nonce, childEnv, 1); err != nil { + if err = sc.UpdateRotationInterval(ctx, childEnv, cli, 1, sc.nonce); err != nil { return err } sc.nonce++ - if _, err = sc.waitMasterSecret(ctx, 3); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 3); err != nil { return err } // Test if all key managers can derive keys from all master secrets. - if err = sc.compareLongtermPublicKeys(ctx, []int{0, 1, 2}); err != nil { + if err = sc.CompareLongtermPublicKeys(ctx, []int{0, 1, 2}); err != nil { return err } // Test master secrets if only two/one manager is running. - if err = sc.stopKeymanagers(ctx, []int{2}); err != nil { + if err = sc.StopKeymanagers(ctx, []int{2}); err != nil { return err } - if _, err = sc.waitMasterSecret(ctx, 4); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 4); err != nil { return err } - if err = sc.stopKeymanagers(ctx, []int{1}); err != nil { + if err = sc.StopKeymanagers(ctx, []int{1}); err != nil { return err } - if _, err = sc.waitMasterSecret(ctx, 6); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 6); err != nil { return err } // Check how frequently secrets are rotated. interval := beacon.EpochTime(3) - if err = sc.updateRotationInterval(ctx, sc.nonce, childEnv, interval); err != nil { + if err = sc.UpdateRotationInterval(ctx, childEnv, cli, interval, sc.nonce); err != nil { return err } sc.nonce++ - prev, err := sc.waitMasterSecret(ctx, 7) + prev, err := sc.WaitMasterSecret(ctx, 7) if err != nil { return err } - next, err := sc.waitMasterSecret(ctx, 8) + next, err := sc.WaitMasterSecret(ctx, 8) if err != nil { return err } @@ -147,16 +150,16 @@ func (sc *kmMasterSecretsImpl) Run(ctx context.Context, childEnv *env.Env) (err } // Disable master secret rotations. - if err = sc.updateRotationInterval(ctx, sc.nonce, childEnv, 0); err != nil { + if err = sc.UpdateRotationInterval(ctx, childEnv, cli, 0, sc.nonce); err != nil { return err } sc.nonce++ - if err = sc.waitEpochs(ctx, 3); err != nil { + if err = sc.WaitEpochs(ctx, 3); err != nil { return err } // No more secrets should be generated. - status, err = sc.keymanagerStatus(ctx) + status, err = sc.KeyManagerStatus(ctx) if err != nil { return err } @@ -216,7 +219,7 @@ func (sc *kmMasterSecretsImpl) monitorMasterSecrets(ctx context.Context) (func() case next = <-mstCh: } - if next.Secret.ID != keymanagerID { + if next.Secret.ID != KeyManagerRuntimeID { continue } @@ -273,7 +276,7 @@ func (sc *kmMasterSecretsImpl) monitorMasterSecrets(ctx context.Context) (func() case next = <-stCh: } - if next.ID != keymanagerID { + if next.ID != KeyManagerRuntimeID { continue } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate.go index e1d3598db2d..6bdefd9398c 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate.go @@ -8,6 +8,7 @@ import ( "golang.org/x/exp/slices" beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" + keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" @@ -24,7 +25,7 @@ func newKmReplicateImpl() scenario.Scenario { return &kmReplicateImpl{ Scenario: *NewScenario( "keymanager-replication", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -71,12 +72,12 @@ func (sc *kmReplicateImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Wait until 3 master secrets are generated. - if _, err := sc.waitMasterSecret(ctx, 2); err != nil { + if _, err := sc.WaitMasterSecret(ctx, 2); err != nil { return fmt.Errorf("master secret not generated: %w", err) } // Make sure exactly two key managers were generating secrets. - status, err := sc.keymanagerStatus(ctx) + status, err := sc.KeyManagerStatus(ctx) if err != nil { return err } @@ -91,12 +92,12 @@ func (sc *kmReplicateImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Generate another 3 master secrets. - if _, err = sc.waitMasterSecret(ctx, 5); err != nil { + if _, err = sc.WaitMasterSecret(ctx, 5); err != nil { return fmt.Errorf("master secret not generated: %w", err) } // Make sure the first key manager was generating secrets. - status, err = sc.keymanagerStatus(ctx) + status, err = sc.KeyManagerStatus(ctx) if err != nil { return err } @@ -106,7 +107,7 @@ func (sc *kmReplicateImpl) Run(ctx context.Context, childEnv *env.Env) error { // Start key managers that are not running and wait until they replicate // master secrets from the first one. - if err = sc.startAndWaitKeymanagers(ctx, []int{1, 2, 3}); err != nil { + if err = sc.StartAndWaitKeymanagers(ctx, []int{1, 2, 3}); err != nil { return err } @@ -129,13 +130,13 @@ func (sc *kmReplicateImpl) Run(ctx context.Context, childEnv *env.Env) error { // Wait few blocks so that the key managers transition to the new secret and register // with the latest checksum. The latter can take some time. - if _, err = sc.waitBlocks(ctx, 8); err != nil { + if _, err = sc.WaitBlocks(ctx, 8); err != nil { return err } // Check if checksums match. for idx := range sc.Net.Keymanagers() { - initRsp, err := sc.keymanagerInitResponse(ctx, idx) + initRsp, err := sc.KeymanagerInitResponse(ctx, idx) if err != nil { return err } @@ -147,9 +148,34 @@ func (sc *kmReplicateImpl) Run(ctx context.Context, childEnv *env.Env) error { // If we came this far than all key managers should have the same state. // Let's test if they replicated the same secrets by fetching long-term // public keys for all generations. - if err := sc.compareLongtermPublicKeys(ctx, []int{0, 1, 2, 3}); err != nil { + if err := sc.CompareLongtermPublicKeys(ctx, []int{0, 1, 2, 3}); err != nil { return err } return nil } + +func (sc *kmReplicateImpl) waitKeymanagerStatuses(ctx context.Context, n int) (*keymanager.Status, error) { + sc.Logger.Info("waiting for key manager status", "n", n) + + stCh, stSub, err := sc.Net.Controller().Keymanager.WatchStatuses(ctx) + if err != nil { + return nil, err + } + defer stSub.Close() + + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case status := <-stCh: + if !status.ID.Equal(&KeyManagerRuntimeID) { + continue + } + n-- + if n <= 0 { + return status, nil + } + } + } +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate_many.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate_many.go index 7ecc460b785..edaf7ebbe33 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate_many.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_replicate_many.go @@ -97,19 +97,19 @@ func (sc *kmReplicateManyImpl) Run(ctx context.Context, childEnv *env.Env) error } // Wait until master secrets are generated. - if _, err := sc.waitMasterSecret(ctx, generation); err != nil { + if _, err := sc.WaitMasterSecret(ctx, generation); err != nil { return err } // Start the last two key managers. - if err := sc.startKeymanagers(ctx, []int{2, 3}); err != nil { + if err := sc.StartKeymanagers(ctx, []int{2, 3}); err != nil { return err } // Wait until all secrets are replicated. start := time.Now() - if err := sc.waitKeymanagers(ctx, []int{2, 3}); err != nil { + if err := sc.WaitKeymanagers(ctx, []int{2, 3}); err != nil { return err } @@ -118,16 +118,16 @@ func (sc *kmReplicateManyImpl) Run(ctx context.Context, childEnv *env.Env) error ) // Compare public keys. - if err := sc.compareLongtermPublicKeys(ctx, []int{0, 1, 2, 3}); err != nil { + if err := sc.CompareLongtermPublicKeys(ctx, []int{0, 1, 2, 3}); err != nil { return err } // Verify that secret can be generated after replication. - status, err := sc.keymanagerStatus(ctx) + status, err := sc.KeyManagerStatus(ctx) if err != nil { return err } - status, err = sc.waitMasterSecret(ctx, status.Generation+2) + status, err = sc.WaitMasterSecret(ctx, status.Generation+2) if err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_restart.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_restart.go index a43ce437cb1..f069e1850d0 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_restart.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_restart.go @@ -21,7 +21,7 @@ func newKmRestartImpl() scenario.Scenario { return &kmRestartImpl{ Scenario: *NewScenario( "keymanager-restart", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -66,31 +66,28 @@ func (sc *kmRestartImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Wait for the client to exit. - if err := sc.WaitTestClientOnly(); err != nil { + if err := sc.WaitTestClient(); err != nil { return err } // Wait until 3 master secrets are generated. - if _, err := sc.waitMasterSecret(ctx, 2); err != nil { + if _, err := sc.WaitMasterSecret(ctx, 2); err != nil { return fmt.Errorf("master secret not generated: %w", err) } // Restart the key managers. - if err := sc.restartAndWaitKeymanagers(ctx, []int{0, 1, 2}); err != nil { + if err := sc.RestartAndWaitKeymanagers(ctx, []int{0, 1, 2}); err != nil { return err } // Test if rotations still work. - if _, err := sc.waitMasterSecret(ctx, 5); err != nil { + if _, err := sc.WaitMasterSecret(ctx, 5); err != nil { return err } // Run the second client on a different key so that it will require // a second trip to the keymanager. sc.Logger.Info("starting a second client to check if key manager works") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) - if err := sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_rotation_failure.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_rotation_failure.go index c915b6459f5..e7273e69ed6 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_rotation_failure.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_rotation_failure.go @@ -83,13 +83,13 @@ func (sc *kmRotationFailureImpl) Run(ctx context.Context, childEnv *env.Env) err for i := 0; i < 3; i++ { // Start the third key manager. - if err := sc.startKeymanagers(ctx, []int{1, 2}); err != nil { + if err := sc.StartKeymanagers(ctx, []int{1, 2}); err != nil { return err } // Verify that master secret generation works. generation := uint64(2*i + 1) - status, err := sc.waitMasterSecret(ctx, generation) + status, err := sc.WaitMasterSecret(ctx, generation) if err != nil { return fmt.Errorf("master secret was not generated: %w", err) } @@ -102,12 +102,12 @@ func (sc *kmRotationFailureImpl) Run(ctx context.Context, childEnv *env.Env) err // Give key managers enough time to apply the last proposal and register with the latests // checksum. This process can take several blocks. - if _, err := sc.waitBlocks(ctx, 5); err != nil { + if _, err := sc.WaitBlocks(ctx, 5); err != nil { return err } // Stop two key managers, leaving only 33% of the committee members to be active. - if err := sc.stopKeymanagers(ctx, []int{1, 2}); err != nil { + if err := sc.StopKeymanagers(ctx, []int{1, 2}); err != nil { return err } @@ -125,7 +125,7 @@ func (sc *kmRotationFailureImpl) Run(ctx context.Context, childEnv *env.Env) err } // Verify that master secret generation works after the third key manager is deregistered. - status, err := sc.waitMasterSecret(ctx, 6) + status, err := sc.WaitMasterSecret(ctx, 6) if err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_upgrade.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_upgrade.go index f58483ebf50..15f7b587531 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_upgrade.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_upgrade.go @@ -1,23 +1,12 @@ package runtime import ( - "bytes" "context" - "fmt" - "path/filepath" - "time" - "github.com/oasisprotocol/oasis-core/go/common" - "github.com/oasisprotocol/oasis-core/go/common/cbor" - "github.com/oasisprotocol/oasis-core/go/common/sgx" - "github.com/oasisprotocol/oasis-core/go/common/version" - consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" - keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" ) // KeymanagerUpgrade is the keymanager upgrade scenario. @@ -27,7 +16,7 @@ var KeymanagerUpgrade scenario.Scenario = NewKmUpgradeImpl() type KmUpgradeImpl struct { Scenario - nonce uint64 + upgradedKeyManagerIndex int } // NewKmUpgradeImpl creates a new base scenario for oasis-node keymanager upgrade end-to-end tests. @@ -35,7 +24,7 @@ func NewKmUpgradeImpl() scenario.Scenario { return &KmUpgradeImpl{ Scenario: *NewScenario( "keymanager-upgrade", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -46,34 +35,9 @@ func (sc *KmUpgradeImpl) Fixture() (*oasis.NetworkFixture, error) { return nil, err } - // Load the upgraded keymanager binary. - newKmBinaries := sc.resolveRuntimeBinaries("simple-keymanager-upgrade") - // Setup the upgraded runtime. - kmRuntimeFix := f.Runtimes[0] - if kmRuntimeFix.Kind != registry.KindKeyManager { - return nil, fmt.Errorf("expected first runtime in fixture to be keymanager runtime, got: %s", kmRuntimeFix.Kind) - } - kmRuntimeFix.Deployments = []oasis.DeploymentCfg{ - { - Binaries: newKmBinaries, - Version: version.Version{Major: 0, Minor: 1, Patch: 0}, - }, + if sc.upgradedKeyManagerIndex, err = sc.UpgradeKeyManagerFixture(f); err != nil { + return nil, err } - // The upgraded runtime will be registered later. - kmRuntimeFix.ExcludeFromGenesis = true - f.Runtimes = append(f.Runtimes, kmRuntimeFix) - - // Allow keymanager-0 to exit after replication is done. - f.Keymanagers[0].AllowEarlyTermination = true - - // Add the upgraded keymanager, will be started later. - f.Keymanagers = append(f.Keymanagers, oasis.KeymanagerFixture{ - NodeFixture: oasis.NodeFixture{ - NoAutoStart: true, - }, - Runtime: 2, - Entity: 1, - }) return f, nil } @@ -84,253 +48,24 @@ func (sc *KmUpgradeImpl) Clone() scenario.Scenario { } } -func (sc *KmUpgradeImpl) applyUpgradePolicy(childEnv *env.Env) error { - cli := cli.New(childEnv, sc.Net, sc.Logger) - - kmPolicyPath := filepath.Join(childEnv.Dir(), "km_policy.cbor") - kmPolicySig1Path := filepath.Join(childEnv.Dir(), "km_policy_sig1.pem") - kmPolicySig2Path := filepath.Join(childEnv.Dir(), "km_policy_sig2.pem") - kmPolicySig3Path := filepath.Join(childEnv.Dir(), "km_policy_sig3.pem") - kmUpdateTxPath := filepath.Join(childEnv.Dir(), "km_gen_update.json") - - oldKMRuntime := sc.Net.Runtimes()[0] - newKMRuntime := sc.Net.Runtimes()[2] - // Sanity check fixture. - if err := func() error { - if oldKMRuntime.Kind() != registry.KindKeyManager { - return fmt.Errorf("old keymanager runtime not of kind KindKeyManager") - } - if newKMRuntime.Kind() != registry.KindKeyManager { - return fmt.Errorf("new keymanager runtime not of kind KindKeyManager") - } - if oldKMRuntime.ID() != newKMRuntime.ID() { - return fmt.Errorf("keymanager runtimes ID mismatch") - } - return nil - }(); err != nil { - return fmt.Errorf("keymanager runtimes fixture sanity check: %w", err) - } - - oldKMEncID := oldKMRuntime.GetEnclaveIdentity(0) - newKMEncID := newKMRuntime.GetEnclaveIdentity(0) - - if oldKMEncID == nil && newKMEncID == nil { - sc.Logger.Info("No SGX runtimes, skipping policy update") - return nil - } - - // Ensure enclave IDs differ between the old and new runtimes. - oldEncID, _ := oldKMEncID.MarshalText() - newEncID, _ := newKMEncID.MarshalText() - if bytes.Equal(oldEncID, newEncID) { - return fmt.Errorf("expected different enclave identities, got: %s", newEncID) - } - - // Build updated SGX policies. - sc.Logger.Info("building new KM SGX policy enclave policies map") - enclavePolicies := make(map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) - - enclavePolicies[*newKMEncID] = &keymanager.EnclavePolicySGX{} - enclavePolicies[*newKMEncID].MayQuery = make(map[common.Namespace][]sgx.EnclaveIdentity) - enclavePolicies[*oldKMEncID] = &keymanager.EnclavePolicySGX{} - enclavePolicies[*oldKMEncID].MayQuery = make(map[common.Namespace][]sgx.EnclaveIdentity) - - // Allow new runtime enclave to replicate from the old runtime enclave. - enclavePolicies[*oldKMEncID].MayReplicate = []sgx.EnclaveIdentity{*newKMEncID} - - // Allow compute runtime to query new runtime. - for _, rt := range sc.Net.Runtimes() { - if rt.Kind() != registry.KindCompute { - continue - } - if eid := rt.GetEnclaveIdentity(0); eid != nil { - enclavePolicies[*newKMEncID].MayQuery[rt.ID()] = []sgx.EnclaveIdentity{*eid} - } - } - - sc.Logger.Info("initing updated KM policy") - if err := cli.Keymanager.InitPolicy(oldKMRuntime.ID(), 2, 0, enclavePolicies, kmPolicyPath); err != nil { - return err - } - sc.Logger.Info("signing updated KM policy") - if err := cli.Keymanager.SignPolicy("1", kmPolicyPath, kmPolicySig1Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("2", kmPolicyPath, kmPolicySig2Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("3", kmPolicyPath, kmPolicySig3Path); err != nil { - return err - } - - sc.Logger.Info("updating KM policy") - if err := cli.Keymanager.GenUpdate(sc.nonce, kmPolicyPath, []string{kmPolicySig1Path, kmPolicySig2Path, kmPolicySig3Path}, kmUpdateTxPath); err != nil { - return err - } - if err := cli.Consensus.SubmitTx(kmUpdateTxPath); err != nil { - return fmt.Errorf("failed to update KM policy: %w", err) - } - sc.nonce++ - - return nil -} - -func (sc *KmUpgradeImpl) ensureReplicationWorked(ctx context.Context, km *oasis.Keymanager, rt *oasis.Runtime) error { - ctrl, err := oasis.NewController(km.SocketPath()) - if err != nil { - return err - } - node, err := ctrl.Registry.GetNode( - ctx, - ®istry.IDQuery{ - ID: km.NodeID, - }, - ) - if err != nil { - return err - } - nodeRt := node.GetRuntime(rt.ID(), version.Version{Major: 0, Minor: 1, Patch: 0}) - if nodeRt == nil { - return fmt.Errorf("node is missing keymanager runtime from descriptor") - } - var signedInitResponse keymanager.SignedInitResponse - if err = cbor.Unmarshal(nodeRt.ExtraInfo, &signedInitResponse); err != nil { - return fmt.Errorf("failed to unmarshal replica extrainfo") - } - - // Grab a state dump and ensure all keymanager nodes have a matching - // checksum. - doc, err := ctrl.Consensus.StateToGenesis(ctx, 0) - if err != nil { - return fmt.Errorf("failed to obtain consensus state: %w", err) - } - if err = func() error { - for _, status := range doc.KeyManager.Statuses { - if !status.ID.Equal(&nodeRt.ID) { - continue - } - if !status.IsInitialized { - return fmt.Errorf("key manager failed to initialize") - } - if !bytes.Equal(status.Checksum, signedInitResponse.InitResponse.Checksum) { - return fmt.Errorf("key manager failed to replicate, checksum mismatch") - } - return nil - } - return fmt.Errorf("consensus state missing km status") - }(); err != nil { - return err - } - - return nil -} - func (sc *KmUpgradeImpl) Run(ctx context.Context, childEnv *env.Env) error { cli := cli.New(childEnv, sc.Net, sc.Logger) - if err := sc.StartNetworkAndTestClient(ctx, childEnv); err != nil { + // Start the network and run the test client. + if err := sc.StartNetworkAndWaitForClientSync(ctx); err != nil { return err } - sc.Logger.Info("waiting for client to exit") - // Wait for the client to exit. - if err := sc.WaitTestClientOnly(); err != nil { + if err := sc.RunTestClientAndCheckLogs(ctx, childEnv); err != nil { return err } - // Fetch nonce. - nonce, err := sc.GetTestEntityNonce(ctx) - if err != nil { + // Upgrade the key manager runtime. + if err := sc.UpgradeKeyManager(ctx, childEnv, cli, sc.upgradedKeyManagerIndex, 0); err != nil { return err } - sc.nonce = nonce - - // Generate and update a policy that will allow replication for the new - // keymanager. - if err = sc.applyUpgradePolicy(childEnv); err != nil { - return fmt.Errorf("updating policies: %w", err) - } - - // Start the new keymanager. - sc.Logger.Info("starting new keymanager") - newKm := sc.Net.Keymanagers()[1] - if err = newKm.Start(); err != nil { - return fmt.Errorf("starting new key-manager: %w", err) - } - - // Fetch current epoch. - epoch, err := sc.Net.Controller().Beacon.GetEpoch(ctx, consensus.HeightLatest) - if err != nil { - return fmt.Errorf("failed to get current epoch: %w", err) - } - - // Fetch old deployment. - oldRtDsc, err := sc.Net.Controller().Registry.GetRuntime(ctx, ®istry.GetRuntimeQuery{ - Height: consensus.HeightLatest, - ID: sc.Net.Runtimes()[2].ID(), - }) - if err != nil { - return fmt.Errorf("failed to get runtime descriptor: %w", err) - } - - // Update runtime to include the new enclave identity. - sc.Logger.Info("updating keymanager runtime descriptor") - newRt := sc.Net.Runtimes()[2] - rtDsc := newRt.ToRuntimeDescriptor() - rtDsc.Deployments[0].ValidFrom = epoch + 1 - rtDsc.Deployments = append(oldRtDsc.Deployments, rtDsc.Deployments...) // Add old deployment. - kmTxPath := filepath.Join(childEnv.Dir(), "register_km_runtime.json") - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), rtDsc, sc.nonce, kmTxPath); err != nil { - return fmt.Errorf("failed to generate register KM runtime tx: %w", err) - } - sc.nonce++ - if err = cli.Consensus.SubmitTx(kmTxPath); err != nil { - return fmt.Errorf("failed to update KM runtime: %w", err) - } - - // Wait for the new node to register. - sc.Logger.Info("waiting for new keymanager node to register", - "num_nodes", sc.Net.NumRegisterNodes(), - ) - if err = sc.Net.Keymanagers()[1].WaitReady(ctx); err != nil { - return fmt.Errorf("error waiting for new keymanager to be ready: %w", err) - } - - // Ensure replication succeeded. - if err = sc.ensureReplicationWorked(ctx, newKm, newRt); err != nil { - return err - } - - nodeCh, nodeSub, err := sc.Net.Controller().Registry.WatchNodes(ctx) - if err != nil { - return fmt.Errorf("failed to watch nodes: %w", err) - } - defer nodeSub.Close() - - // Shutdown old keymanager and make sure it deregisters. - sc.Logger.Info("shutting down old keymanager") - oldKm := sc.Net.Keymanagers()[0] - if err := oldKm.RequestShutdown(ctx, true); err != nil { - return fmt.Errorf("failed to request shutdown: %w", err) - } - - // Ensure keymanager deregisters. -OUTER: - for { - select { - case ev := <-nodeCh: - if !ev.IsRegistration && ev.Node.ID.Equal(oldKm.NodeID) { - break OUTER - } - case <-time.After(10 * time.Second): - return fmt.Errorf("failed to wait for keymanager to de-register") - } - } // Run client again. sc.Logger.Info("starting a second client to check if key manager works") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) - if err := sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_util.go b/go/oasis-test-runner/scenario/e2e/runtime/keymanager_util.go deleted file mode 100644 index 4d04db97105..00000000000 --- a/go/oasis-test-runner/scenario/e2e/runtime/keymanager_util.go +++ /dev/null @@ -1,369 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - - "github.com/oasisprotocol/curve25519-voi/primitives/x25519" - - beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" - "github.com/oasisprotocol/oasis-core/go/common/cbor" - "github.com/oasisprotocol/oasis-core/go/common/sgx" - "github.com/oasisprotocol/oasis-core/go/common/version" - consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" - keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" -) - -func (sc *Scenario) waitKeymanagers(ctx context.Context, idxs []int) error { - sc.Logger.Info("waiting for the key managers to become ready", "ids", fmt.Sprintf("%+v", idxs)) - - kms := sc.Net.Keymanagers() - for _, idx := range idxs { - kmCtrl, err := oasis.NewController(kms[idx].SocketPath()) - if err != nil { - return err - } - if err = kmCtrl.WaitReady(ctx); err != nil { - return err - } - } - return nil -} - -func (sc *Scenario) startKeymanagers(ctx context.Context, idxs []int) error { - sc.Logger.Info("starting the key managers", "ids", fmt.Sprintf("%+v", idxs)) - - kms := sc.Net.Keymanagers() - for _, idx := range idxs { - if err := kms[idx].Start(); err != nil { - return err - } - } - return nil -} - -func (sc *Scenario) stopKeymanagers(ctx context.Context, idxs []int) error { - sc.Logger.Info("stopping the key managers", "ids", fmt.Sprintf("%+v", idxs)) - - kms := sc.Net.Keymanagers() - for _, idx := range idxs { - if err := kms[idx].Stop(); err != nil { - return err - } - } - return nil -} - -func (sc *Scenario) restartKeymanagers(ctx context.Context, idxs []int) error { - sc.Logger.Info("restarting the key managers", "ids", fmt.Sprintf("%+v", idxs)) - - kms := sc.Net.Keymanagers() - for _, idx := range idxs { - if err := kms[idx].Restart(ctx); err != nil { - return err - } - } - return nil -} - -func (sc *Scenario) startAndWaitKeymanagers(ctx context.Context, idxs []int) error { - if err := sc.startKeymanagers(ctx, idxs); err != nil { - return err - } - return sc.waitKeymanagers(ctx, idxs) -} - -func (sc *Scenario) restartAndWaitKeymanagers(ctx context.Context, idxs []int) error { - if err := sc.restartKeymanagers(ctx, idxs); err != nil { - return err - } - return sc.waitKeymanagers(ctx, idxs) -} - -func (sc *Scenario) keymanagerStatus(ctx context.Context) (*keymanager.Status, error) { - return sc.Net.ClientController().Keymanager.GetStatus(ctx, ®istry.NamespaceQuery{ - Height: consensus.HeightLatest, - ID: keymanagerID, - }) -} - -func (sc *Scenario) keymanagerMasterSecret(ctx context.Context) (*keymanager.SignedEncryptedMasterSecret, error) { - secret, err := sc.Net.ClientController().Keymanager.GetMasterSecret(ctx, ®istry.NamespaceQuery{ - Height: consensus.HeightLatest, - ID: keymanagerID, - }) - if err == keymanager.ErrNoSuchMasterSecret { - return nil, nil - } - return secret, err -} - -func (sc *Scenario) keymanagerInitResponse(ctx context.Context, idx int) (*keymanager.InitResponse, error) { - kms := sc.Net.Keymanagers() - if kmLen := len(kms); kmLen <= idx { - return nil, fmt.Errorf("expected more than %d keymanager, have: %v", idx, kmLen) - } - km := kms[idx] - - ctrl, err := oasis.NewController(km.SocketPath()) - if err != nil { - return nil, err - } - - // Extract ExtraInfo. - node, err := ctrl.Registry.GetNode( - ctx, - ®istry.IDQuery{ - ID: km.NodeID, - }, - ) - if err != nil { - return nil, err - } - rt := node.GetRuntime(keymanagerID, version.Version{}) - if rt == nil { - return nil, fmt.Errorf("key manager is missing keymanager runtime from descriptor") - } - var signedInitResponse keymanager.SignedInitResponse - if err = cbor.Unmarshal(rt.ExtraInfo, &signedInitResponse); err != nil { - return nil, fmt.Errorf("failed to unmarshal extrainfo") - } - - return &signedInitResponse.InitResponse, nil -} - -func (sc *kmReplicateImpl) waitKeymanagerStatuses(ctx context.Context, n int) (*keymanager.Status, error) { - sc.Logger.Info("waiting for key manager status", "n", n) - - stCh, stSub, err := sc.Net.Controller().Keymanager.WatchStatuses(ctx) - if err != nil { - return nil, err - } - defer stSub.Close() - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case status := <-stCh: - if !status.ID.Equal(&keymanagerID) { - continue - } - n-- - if n <= 0 { - return status, nil - } - } - } -} - -func (sc *Scenario) waitMasterSecret(ctx context.Context, generation uint64) (*keymanager.Status, error) { - sc.Logger.Info("waiting for master secret", "generation", generation) - - mstCh, mstSub, err := sc.Net.Controller().Keymanager.WatchMasterSecrets(ctx) - if err != nil { - return nil, err - } - defer mstSub.Close() - - stCh, stSub, err := sc.Net.Controller().Keymanager.WatchStatuses(ctx) - if err != nil { - return nil, err - } - defer stSub.Close() - - var last *keymanager.Status - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case secret := <-mstCh: - if !secret.Secret.ID.Equal(&keymanagerID) { - continue - } - - sc.Logger.Info("master secret proposed", - "generation", secret.Secret.Generation, - "epoch", secret.Secret.Epoch, - "num_ciphertexts", len(secret.Secret.Secret.Ciphertexts), - ) - case status := <-stCh: - if !status.ID.Equal(&keymanagerID) { - continue - } - if status.NextGeneration() == 0 { - continue - } - if last != nil && status.Generation == last.Generation { - last = status - continue - } - - sc.Logger.Info("master secret rotation", - "generation", status.Generation, - "rotation_epoch", status.RotationEpoch, - ) - - if status.Generation >= generation { - return status, nil - } - last = status - } - } -} - -func (sc *Scenario) waitEphemeralSecrets(ctx context.Context, n int) (*keymanager.SignedEncryptedEphemeralSecret, error) { - sc.Logger.Info("waiting ephemeral secrets", "n", n) - - ephCh, ephSub, err := sc.Net.Controller().Keymanager.WatchEphemeralSecrets(ctx) - if err != nil { - return nil, err - } - defer ephSub.Close() - - var secret *keymanager.SignedEncryptedEphemeralSecret - for i := 0; i < n; i++ { - select { - case secret = <-ephCh: - sc.Logger.Info("ephemeral secret published", - "epoch", secret.Secret.Epoch, - ) - case <-ctx.Done(): - return nil, fmt.Errorf("timed out waiting for ephemeral secrets") - } - } - return secret, nil -} - -func (sc *Scenario) updateRotationInterval(ctx context.Context, nonce uint64, childEnv *env.Env, rotationInterval beacon.EpochTime) error { - sc.Logger.Info("updating master secret rotation interval in the key manager policy", - "interval", rotationInterval, - ) - - status, err := sc.keymanagerStatus(ctx) - if err != nil { - return err - } - - // Update the policy, or create a new one if it doesn't already exist. - var policy keymanager.PolicySGX - if status != nil && status.Policy != nil { - policy = status.Policy.Policy - policy.Serial++ - } else { - policy.Serial = 1 - policy.ID = keymanagerID - policy.Enclaves = make(map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) - } - policy.MasterSecretRotationInterval = rotationInterval - - // Sign and publish the new policy. - kmPolicyPath := filepath.Join(childEnv.Dir(), "km_policy.cbor") - kmPolicySig1Path := filepath.Join(childEnv.Dir(), "km_policy_sig1.pem") - kmPolicySig2Path := filepath.Join(childEnv.Dir(), "km_policy_sig2.pem") - kmPolicySig3Path := filepath.Join(childEnv.Dir(), "km_policy_sig3.pem") - kmUpdateTxPath := filepath.Join(childEnv.Dir(), "km_gen_update.json") - - sc.Logger.Info("saving key manager policy") - raw := cbor.Marshal(policy) - if err = os.WriteFile(kmPolicyPath, raw, 0o644); err != nil { // nolint: gosec - return err - } - - sc.Logger.Info("signing key manager policy") - cli := cli.New(childEnv, sc.Net, sc.Logger) - if err := cli.Keymanager.SignPolicy("1", kmPolicyPath, kmPolicySig1Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("2", kmPolicyPath, kmPolicySig2Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("3", kmPolicyPath, kmPolicySig3Path); err != nil { - return err - } - - sc.Logger.Info("updating key manager policy") - if err := cli.Keymanager.GenUpdate(nonce, kmPolicyPath, []string{kmPolicySig1Path, kmPolicySig2Path, kmPolicySig3Path}, kmUpdateTxPath); err != nil { - return err - } - if err := cli.Consensus.SubmitTx(kmUpdateTxPath); err != nil { - return fmt.Errorf("failed to update key manager policy: %w", err) - } - - return nil -} - -func (sc *Scenario) compareLongtermPublicKeys(ctx context.Context, idxs []int) error { - chainContext, err := sc.Net.Controller().Consensus.GetChainContext(ctx) - if err != nil { - return err - } - - status, err := sc.keymanagerStatus(ctx) - if err != nil { - return err - } - - var generation uint64 - if status.Generation > 0 { - // Avoid verification problems when the consensus verifier is one block behind. - generation = status.Generation - 1 - } - - sc.Logger.Info("comparing the key managers for master secrets", - "ids", idxs, - "generation", generation, - ) - - keys := make(map[uint64]*x25519.PublicKey) - kms := sc.Net.Keymanagers() - for _, idx := range idxs { - km := kms[idx] - - // Prepare an RPC client which will be used to query key manager nodes - // for public ephemeral keys. - rpcClient, err := newKeyManagerRPCClient(chainContext) - if err != nil { - return err - } - peerID, err := rpcClient.addKeyManagerAddrToHost(km) - if err != nil { - return err - } - - for gen := uint64(0); gen <= generation; gen++ { - sc.Logger.Info("fetching public key", "generation", gen, "node", km.Name) - - var key *x25519.PublicKey - key, err = rpcClient.fetchPublicKey(ctx, gen, peerID) - switch { - case err != nil: - return err - case key == nil: - return fmt.Errorf("master secret generation %d not found", gen) - } - - if expected, ok := keys[gen]; ok && !bytes.Equal(expected[:], key[:]) { - return fmt.Errorf("derived keys don't match: expected %+X, given %+X", expected, key) - } - keys[gen] = key - - sc.Logger.Info("public key fetched", "key", fmt.Sprintf("%+X", key)) - } - if err != nil { - return err - } - } - if expected, size := int(generation)+1, len(keys); expected != size { - return fmt.Errorf("the number of derived keys doesn't match: expected %d, found %d", expected, size) - } - - return nil -} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/late_start.go b/go/oasis-test-runner/scenario/e2e/runtime/late_start.go index f7ec510e767..6f7d2ece43f 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/late_start.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/late_start.go @@ -26,7 +26,7 @@ func newLateStartImpl(name string) scenario.Scenario { return &lateStartImpl{ Scenario: *NewScenario( name, - NewKVTestClient().WithScenario(SimpleKeyValueScenario), + NewTestClient().WithScenario(SimpleKeyValueScenario), ), } } @@ -78,7 +78,7 @@ func (sc *lateStartImpl) Run(ctx context.Context, childEnv *env.Env) error { return fmt.Errorf("failed to create controller for client: %w", err) } err = ctrl.RuntimeClient.SubmitTxNoWait(ctx, &api.SubmitTxRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Data: cbor.Marshal(&TxnCall{ Method: "insert", Args: struct { @@ -94,7 +94,7 @@ func (sc *lateStartImpl) Run(ctx context.Context, childEnv *env.Env) error { return fmt.Errorf("expected error: %v, got: %v", api.ErrNotSynced, err) } _, err = ctrl.RuntimeClient.SubmitTx(ctx, &api.SubmitTxRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Data: cbor.Marshal(&TxnCall{ Method: "insert", Args: struct { @@ -116,11 +116,8 @@ func (sc *lateStartImpl) Run(ctx context.Context, childEnv *env.Env) error { sc.Logger.Info("Starting the basic test client") // Explicitly wait for the client to sync, before starting the client. - if err = sc.waitForClientSync(ctx); err != nil { + if err = sc.WaitForClientSync(ctx); err != nil { return err } - if err = sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/node_shutdown.go b/go/oasis-test-runner/scenario/e2e/runtime/node_shutdown.go index 3b5270ba23b..4e08fa9e0d6 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/node_shutdown.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/node_shutdown.go @@ -28,7 +28,7 @@ func newNodeShutdownImpl() scenario.Scenario { sc := &nodeShutdownImpl{ Scenario: *NewScenario( "node-shutdown", - NewKVTestClient().WithScenario(SimpleKeyValueScenario), + NewTestClient().WithScenario(SimpleKeyValueScenario), ), } return sc @@ -64,7 +64,7 @@ func (sc *nodeShutdownImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -88,10 +88,10 @@ func (sc *nodeShutdownImpl) Run(ctx context.Context, childEnv *env.Env) error { if status.Consensus.Status != consensusAPI.StatusStateReady { return fmt.Errorf("node consensus status should be '%s', got: '%s'", consensusAPI.StatusStateReady, status.Consensus.Status) } - if status.Runtimes[runtimeID].Committee == nil { + if status.Runtimes[KeyValueRuntimeID].Committee == nil { return fmt.Errorf("node committee status missing") } - if st := status.Runtimes[runtimeID].Committee.Status; st != api.StatusStateReady { + if st := status.Runtimes[KeyValueRuntimeID].Committee.Status; st != api.StatusStateReady { return fmt.Errorf("node compute worker status should be '%s', got: '%s'", api.StatusStateReady, st) } if status.Registration.Descriptor == nil { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/offset_restart.go b/go/oasis-test-runner/scenario/e2e/runtime/offset_restart.go index 9812cc78e1e..43ebf28a8b5 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/offset_restart.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/offset_restart.go @@ -19,7 +19,7 @@ func newOffsetRestartImpl() scenario.Scenario { sc := &offsetRestartImpl{ Scenario: *NewScenario( "offset-restart", - NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), + NewTestClient().WithScenario(InsertTransferKeyValueScenario), ), } return sc @@ -61,7 +61,7 @@ func (sc *offsetRestartImpl) Run(ctx context.Context, childEnv *env.Env) error { return err } - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -89,6 +89,6 @@ func (sc *offsetRestartImpl) Run(ctx context.Context, childEnv *env.Env) error { // if these disconnected after the client node had already seen them, thereby // hanging the network (no transactions could be submitted). sc.Logger.Info("network back up, trying to run client again") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(RemoveKeyValueScenario) return sc.Scenario.Run(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime.go index 204983533c5..f264cd6c580 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime.go @@ -2,78 +2,18 @@ package runtime import ( "context" - "crypto/rand" "fmt" - "path/filepath" "time" - beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" "github.com/oasisprotocol/oasis-core/go/common" "github.com/oasisprotocol/oasis-core/go/common/cbor" memorySigner "github.com/oasisprotocol/oasis-core/go/common/crypto/signature/signers/memory" - "github.com/oasisprotocol/oasis-core/go/common/node" - "github.com/oasisprotocol/oasis-core/go/common/sgx" - consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/cmd" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/log" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario/e2e" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" roothash "github.com/oasisprotocol/oasis-core/go/roothash/api" runtimeClient "github.com/oasisprotocol/oasis-core/go/runtime/client/api" - runtimeConfig "github.com/oasisprotocol/oasis-core/go/runtime/config" - scheduler "github.com/oasisprotocol/oasis-core/go/scheduler/api" staking "github.com/oasisprotocol/oasis-core/go/staking/api" ) -const ( - cfgRuntimeBinaryDirDefault = "runtime.binary_dir.default" - cfgRuntimeBinaryDirIntelSGX = "runtime.binary_dir.intel-sgx" - cfgRuntimeSourceDir = "runtime.source_dir" - cfgRuntimeTargetDir = "runtime.target_dir" - cfgRuntimeLoader = "runtime.loader" - cfgRuntimeProvisioner = "runtime.provisioner" - cfgTEEHardware = "tee_hardware" - cfgIasMock = "ias.mock" - cfgEpochInterval = "epoch.interval" -) - -var ( - // ParamsDummyScenario is a dummy instance of runtimeImpl used to register global e2e/runtime flags. - ParamsDummyScenario = NewScenario("", nil) - - // Runtime is the basic network + client test case with runtime support. - Runtime scenario.Scenario = NewScenario( - "runtime", - NewKVTestClient().WithScenario(SimpleKeyValueScenario), - ) - - // RuntimeEncryption is the basic network + client with encryption test case. - RuntimeEncryption scenario.Scenario = NewScenario( - "runtime-encryption", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), - ) - - // DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher - // handler factories for the basic scenario. - DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{ - oasis.LogAssertNoTimeouts(), - oasis.LogAssertNoRoundFailures(), - oasis.LogAssertNoExecutionDiscrepancyDetected(), - } - - runtimeBinary = "simple-keyvalue" - keyManagerBinary = "simple-keymanager" - - runtimeID common.Namespace - keymanagerID common.Namespace - _ = runtimeID.UnmarshalHex("8000000000000000000000000000000000000000000000000000000000000000") - _ = keymanagerID.UnmarshalHex("c000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff") -) - // TxnCall is a transaction call in the test runtime. type TxnCall struct { // Nonce is a nonce. @@ -92,313 +32,6 @@ type TxnOutput struct { Error *string } -// Scenario is a base class for tests involving oasis-node with runtime. -type Scenario struct { - e2e.Scenario - - testClient TestClient - - // This disables the random initial epoch for tests that are extremely - // sensitive to the initial epoch. Ideally this shouldn't be set for - // any of our tests, but I'm sick and tired of trying to debug poorly - // written test cases. - // - // If your new test needs this, your test is bad, and you should go - // and rewrite it so that this option isn't set. - debugNoRandomInitialEpoch bool - - // The byzantine tests also explode since the node only runs for - // a single epoch. - // - // If your new test needs this, your test is bad, and you should go - // and rewrite it so that this option isn't set. - debugWeakAlphaOk bool -} - -// NewScenario creates a new base scenario for oasis-node runtime end-to-end tests. -func NewScenario(name string, testClient TestClient) *Scenario { - // Empty scenario name is used for registering global parameters only. - fullName := "runtime" - if name != "" { - fullName += "/" + name - } - - sc := &Scenario{ - Scenario: *e2e.NewScenario(fullName), - testClient: testClient, - } - sc.Flags.String(cfgRuntimeBinaryDirDefault, "", "(no-TEE) path to the runtime binaries directory") - sc.Flags.String(cfgRuntimeBinaryDirIntelSGX, "", "(Intel SGX) path to the runtime binaries directory") - sc.Flags.String(cfgRuntimeSourceDir, "", "path to the runtime source base dir") - sc.Flags.String(cfgRuntimeTargetDir, "", "path to the Cargo target dir (should be a parent of the runtime binary dir)") - sc.Flags.String(cfgRuntimeLoader, "oasis-core-runtime-loader", "path to the runtime loader") - sc.Flags.String(cfgRuntimeProvisioner, "sandboxed", "the runtime provisioner: mock, unconfined, or sandboxed") - sc.Flags.String(cfgTEEHardware, "", "TEE hardware to use") - sc.Flags.Bool(cfgIasMock, true, "if mock IAS service should be used") - sc.Flags.Int64(cfgEpochInterval, 0, "epoch interval") - - return sc -} - -func (sc *Scenario) Clone() scenario.Scenario { - var testClient TestClient - if sc.testClient != nil { - testClient = sc.testClient.Clone() - } - return &Scenario{ - Scenario: sc.Scenario.Clone(), - testClient: testClient, - debugNoRandomInitialEpoch: sc.debugNoRandomInitialEpoch, - debugWeakAlphaOk: sc.debugWeakAlphaOk, - } -} - -func (sc *Scenario) PreInit(childEnv *env.Env) error { - return nil -} - -func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error) { - f, err := sc.Scenario.Fixture() - if err != nil { - return nil, err - } - - tee, err := sc.getTEEHardware() - if err != nil { - return nil, err - } - var mrSigner *sgx.MrSigner - if tee == node.TEEHardwareIntelSGX { - mrSigner = &sgx.FortanixDummyMrSigner - } - runtimeLoader, _ := sc.Flags.GetString(cfgRuntimeLoader) - iasMock, _ := sc.Flags.GetBool(cfgIasMock) - runtimeProvisionerRaw, _ := sc.Flags.GetString(cfgRuntimeProvisioner) - var runtimeProvisioner runtimeConfig.RuntimeProvisioner - if err = runtimeProvisioner.UnmarshalText([]byte(runtimeProvisionerRaw)); err != nil { - return nil, fmt.Errorf("failed to parse runtime provisioner: %w", err) - } - - ff := &oasis.NetworkFixture{ - TEE: oasis.TEEFixture{ - Hardware: tee, - MrSigner: mrSigner, - }, - Network: oasis.NetworkCfg{ - NodeBinary: f.Network.NodeBinary, - RuntimeSGXLoaderBinary: runtimeLoader, - DefaultLogWatcherHandlerFactories: DefaultRuntimeLogWatcherHandlerFactories, - Consensus: f.Network.Consensus, - IAS: oasis.IASCfg{ - Mock: iasMock, - }, - }, - Entities: []oasis.EntityCfg{ - {IsDebugTestEntity: true}, - {}, - }, - Runtimes: []oasis.RuntimeFixture{ - // Key manager runtime. - { - ID: keymanagerID, - Kind: registry.KindKeyManager, - Entity: 0, - Keymanager: -1, - AdmissionPolicy: registry.RuntimeAdmissionPolicy{ - AnyNode: ®istry.AnyNodeRuntimeAdmissionPolicy{}, - }, - GovernanceModel: registry.GovernanceEntity, - Deployments: []oasis.DeploymentCfg{ - { - Binaries: sc.resolveRuntimeBinaries(keyManagerBinary), - }, - }, - }, - // Compute runtime. - { - ID: runtimeID, - Kind: registry.KindCompute, - Entity: 0, - Keymanager: 0, - Executor: registry.ExecutorParameters{ - GroupSize: 2, - GroupBackupSize: 1, - RoundTimeout: 20, - MaxMessages: 128, - }, - TxnScheduler: registry.TxnSchedulerParameters{ - MaxBatchSize: 100, - MaxBatchSizeBytes: 1024 * 1024, - BatchFlushTimeout: 1 * time.Second, - ProposerTimeout: 20, - MaxInMessages: 128, - }, - AdmissionPolicy: registry.RuntimeAdmissionPolicy{ - AnyNode: ®istry.AnyNodeRuntimeAdmissionPolicy{}, - }, - Constraints: map[scheduler.CommitteeKind]map[scheduler.Role]registry.SchedulingConstraints{ - scheduler.KindComputeExecutor: { - scheduler.RoleWorker: { - MinPoolSize: ®istry.MinPoolSizeConstraint{ - Limit: 2, - }, - }, - scheduler.RoleBackupWorker: { - MinPoolSize: ®istry.MinPoolSizeConstraint{ - Limit: 1, - }, - }, - }, - }, - GovernanceModel: registry.GovernanceEntity, - Deployments: []oasis.DeploymentCfg{ - { - Binaries: sc.resolveRuntimeBinaries(runtimeBinary), - }, - }, - }, - }, - Validators: []oasis.ValidatorFixture{ - {Entity: 1, Consensus: oasis.ConsensusFixture{SupplementarySanityInterval: 1}}, - {Entity: 1, Consensus: oasis.ConsensusFixture{}}, - {Entity: 1, Consensus: oasis.ConsensusFixture{}}, - }, - KeymanagerPolicies: []oasis.KeymanagerPolicyFixture{ - {Runtime: 0, Serial: 1, MasterSecretRotationInterval: 0}, - }, - Keymanagers: []oasis.KeymanagerFixture{ - { - RuntimeProvisioner: runtimeProvisioner, - Runtime: 0, - Entity: 1, - Policy: 0, - SkipPolicy: tee != node.TEEHardwareIntelSGX, - }, - }, - ComputeWorkers: []oasis.ComputeWorkerFixture{ - {RuntimeProvisioner: runtimeProvisioner, Entity: 1, Runtimes: []int{1}}, - { - RuntimeProvisioner: runtimeProvisioner, - Entity: 1, - Runtimes: []int{1}, - RuntimeConfig: map[int]map[string]interface{}{ - 1: { - "core": map[string]interface{}{ - "min_gas_price": 1, // Just to test support for runtime configuration. - }, - }, - }, - }, - {RuntimeProvisioner: runtimeProvisioner, Entity: 1, Runtimes: []int{1}}, - }, - Sentries: []oasis.SentryFixture{}, - Seeds: []oasis.SeedFixture{{}}, - Clients: []oasis.ClientFixture{ - {RuntimeProvisioner: runtimeProvisioner, Runtimes: []int{1}}, - }, - } - - if epochInterval, _ := sc.Flags.GetInt64(cfgEpochInterval); epochInterval > 0 { - ff.Network.Beacon.InsecureParameters = &beacon.InsecureParameters{ - Interval: epochInterval, - } - ff.Network.Beacon.VRFParameters = &beacon.VRFParameters{ - AlphaHighQualityThreshold: 3, - Interval: epochInterval, - ProofSubmissionDelay: epochInterval / 2, - } - } - - return ff, nil -} - -// getTEEHardware returns the configured TEE hardware. -func (sc *Scenario) getTEEHardware() (node.TEEHardware, error) { - teeStr, _ := sc.Flags.GetString(cfgTEEHardware) - var tee node.TEEHardware - if err := tee.FromString(teeStr); err != nil { - return node.TEEHardwareInvalid, err - } - return tee, nil -} - -func (sc *Scenario) resolveRuntimeBinaries(baseRuntimeBinary string) map[node.TEEHardware]string { - binaries := make(map[node.TEEHardware]string) - for _, tee := range []node.TEEHardware{ - node.TEEHardwareInvalid, - node.TEEHardwareIntelSGX, - } { - binaries[tee] = sc.resolveRuntimeBinary(baseRuntimeBinary, tee) - } - return binaries -} - -func (sc *Scenario) resolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string { - var runtimeExt, path string - switch tee { - case node.TEEHardwareInvalid: - runtimeExt = "" - path, _ = sc.Flags.GetString(cfgRuntimeBinaryDirDefault) - case node.TEEHardwareIntelSGX: - runtimeExt = ".sgxs" - path, _ = sc.Flags.GetString(cfgRuntimeBinaryDirIntelSGX) - } - - return filepath.Join(path, runtimeBinary+runtimeExt) -} - -// StartNetworkAndTestClient starts the network and the runtime test client. -func (sc *Scenario) StartNetworkAndTestClient(ctx context.Context, childEnv *env.Env) error { - // Start the network - if err := sc.StartNetworkAndWaitForClientSync(ctx); err != nil { - return fmt.Errorf("failed to initialize network: %w", err) - } - - return sc.startTestClientOnly(ctx, childEnv) -} - -func (sc *Scenario) startTestClientOnly(ctx context.Context, childEnv *env.Env) error { - if err := sc.testClient.Init(sc); err != nil { - return fmt.Errorf("failed to initialize test client: %w", err) - } - - if err := sc.testClient.Start(ctx, childEnv); err != nil { - return fmt.Errorf("failed to start test client: %w", err) - } - - return nil -} - -// WaitTestClientOnly waits for the runtime test client to finish its work. -func (sc *Scenario) WaitTestClientOnly() error { - return sc.testClient.Wait() -} - -func (sc *Scenario) checkTestClientLogs() error { - // Wait for logs to be fully processed before checking them. When - // the client exits very quickly the log watchers may not have - // processed the relevant logs yet. - // - // TODO: Find a better way to synchronize log watchers. - time.Sleep(1 * time.Second) - - return sc.Net.CheckLogWatchers() -} - -func (sc *Scenario) waitTestClient() error { - if err := sc.WaitTestClientOnly(); err != nil { - return err - } - return sc.checkTestClientLogs() -} - -func (sc *Scenario) Run(ctx context.Context, childEnv *env.Env) error { - if err := sc.StartNetworkAndTestClient(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() -} - func (sc *Scenario) submitRuntimeTx( ctx context.Context, id common.Namespace, @@ -486,7 +119,7 @@ func (sc *Scenario) submitConsensusXferTxMeta( xfer staking.Transfer, nonce uint64, ) (*runtimeClient.SubmitTxMetaResponse, error) { - return sc.submitRuntimeTxMeta(ctx, runtimeID, nonce, "consensus_transfer", struct { + return sc.submitRuntimeTxMeta(ctx, KeyValueRuntimeID, nonce, "consensus_transfer", struct { Transfer staking.Transfer `json:"transfer"` }{ Transfer: xfer, @@ -555,332 +188,3 @@ func (sc *Scenario) submitRuntimeInMsg(ctx context.Context, id common.Namespace, return nil } - -func (sc *Scenario) waitForClientSync(ctx context.Context) error { - clients := sc.Net.Clients() - if len(clients) == 0 { - return fmt.Errorf("scenario/e2e: network has no client nodes") - } - - sc.Logger.Info("ensuring client node is synced") - ctrl, err := oasis.NewController(clients[0].SocketPath()) - if err != nil { - return fmt.Errorf("failed to create controller for client: %w", err) - } - if err = ctrl.WaitSync(ctx); err != nil { - return fmt.Errorf("client-0 failed to sync: %w", err) - } - - return nil -} - -// StartNetworkAndWaitForClientSync starts the network and waits for the client node to sync. -func (sc *Scenario) StartNetworkAndWaitForClientSync(ctx context.Context) error { - if err := sc.Net.Start(); err != nil { - return err - } - - return sc.waitForClientSync(ctx) -} - -func (sc *Scenario) waitNodesSynced(ctx context.Context) error { - checkSynced := func(n *oasis.Node) error { - c, err := oasis.NewController(n.SocketPath()) - if err != nil { - return fmt.Errorf("failed to create node controller: %w", err) - } - defer c.Close() - - if err = c.WaitSync(ctx); err != nil { - return fmt.Errorf("failed to wait for node to sync: %w", err) - } - return nil - } - - sc.Logger.Info("waiting for all nodes to be synced") - - for _, n := range sc.Net.Validators() { - if err := checkSynced(n.Node); err != nil { - return err - } - } - for _, n := range sc.Net.Keymanagers() { - if err := checkSynced(n.Node); err != nil { - return err - } - } - for _, n := range sc.Net.ComputeWorkers() { - if err := checkSynced(n.Node); err != nil { - return err - } - } - for _, n := range sc.Net.Clients() { - if err := checkSynced(n.Node); err != nil { - return err - } - } - - sc.Logger.Info("nodes synced") - return nil -} - -func (sc *Scenario) waitBlocks(ctx context.Context, n int) (*consensus.Block, error) { - sc.Logger.Info("waiting for blocks", "n", n) - - blockCh, blockSub, err := sc.Net.Controller().Consensus.WatchBlocks(ctx) - if err != nil { - return nil, err - } - defer blockSub.Close() - - var blk *consensus.Block - for i := 0; i < n; i++ { - select { - case blk = <-blockCh: - sc.Logger.Info("new block", - "height", blk.Height, - ) - case <-ctx.Done(): - return nil, fmt.Errorf("timed out waiting for blocks") - } - } - - return blk, nil -} - -func (sc *Scenario) waitEpochs(ctx context.Context, n beacon.EpochTime) error { - sc.Logger.Info("waiting few epochs", "n", n) - - epoch, err := sc.Net.ClientController().Beacon.GetEpoch(ctx, consensus.HeightLatest) - if err != nil { - return err - } - if err := sc.Net.ClientController().Beacon.WaitEpoch(ctx, epoch+n); err != nil { - return err - } - return nil -} - -func (sc *Scenario) initialEpochTransitions(ctx context.Context, fixture *oasis.NetworkFixture) (beacon.EpochTime, error) { - return sc.initialEpochTransitionsWith(ctx, fixture, 0) -} - -func (sc *Scenario) initialEpochTransitionsWith(ctx context.Context, fixture *oasis.NetworkFixture, baseEpoch beacon.EpochTime) (beacon.EpochTime, error) { - epoch := baseEpoch + 1 - advanceEpoch := func() error { - sc.Logger.Info("triggering epoch transition", - "epoch", epoch, - ) - if err := sc.Net.Controller().SetEpoch(ctx, epoch); err != nil { - return fmt.Errorf("failed to set epoch: %w", err) - } - sc.Logger.Info("epoch transition done", - "epoch", epoch, - ) - - epoch++ - - return nil - } - - if len(sc.Net.Keymanagers()) > 0 { - // First wait for validator and key manager nodes to register. Then perform an epoch - // transition which will cause the compute and storage nodes to register. - sc.Logger.Info("waiting for validators to initialize", - "num_validators", len(sc.Net.Validators()), - ) - for i, n := range sc.Net.Validators() { - if fixture.Validators[i].NoAutoStart { - // Skip nodes that don't auto start. - continue - } - if err := n.WaitReady(ctx); err != nil { - return epoch, fmt.Errorf("failed to wait for a validator: %w", err) - } - } - sc.Logger.Info("waiting for key managers to initialize", - "num_keymanagers", len(sc.Net.Keymanagers()), - ) - for i, n := range sc.Net.Keymanagers() { - if fixture.Keymanagers[i].NoAutoStart { - // Skip nodes that don't auto start. - continue - } - if err := n.WaitReady(ctx); err != nil { - return epoch, fmt.Errorf("failed to wait for a key manager: %w", err) - } - } - } - - if err := advanceEpoch(); err != nil { // Epoch 1 - return epoch, err - } - - // Wait for compute workers to become ready. - sc.Logger.Info("waiting for compute workers to initialize", - "num_compute_workers", len(sc.Net.ComputeWorkers()), - ) - for i, n := range sc.Net.ComputeWorkers() { - if fixture.ComputeWorkers[i].NoAutoStart { - // Skip nodes that don't auto start. - continue - } - if err := n.WaitReady(ctx); err != nil { - return epoch, fmt.Errorf("failed to wait for a compute worker: %w", err) - } - } - - // Byzantine nodes can only registered. If defined, since we cannot control them directly, wait - // for all nodes to become registered. - if len(sc.Net.Byzantine()) > 0 { - sc.Logger.Info("waiting for (all) nodes to register", - "num_nodes", sc.Net.NumRegisterNodes(), - ) - if err := sc.Net.Controller().WaitNodesRegistered(ctx, sc.Net.NumRegisterNodes()); err != nil { - return epoch, fmt.Errorf("failed to wait for nodes: %w", err) - } - } - - // Then perform epoch transition(s) to elect the committees. - if err := advanceEpoch(); err != nil { // Epoch 2 - return epoch, err - } - switch sc.Net.Config().Beacon.Backend { - case "", beacon.BackendVRF: - // The byzantine node gets jammed into a committee first thing, which - // breaks everything because our test case failure detection log watcher - // can't cope with expected failures. So once we elect, if the byzantine - // node is active, we need to immediately transition into doing interesting - // things. - if !sc.debugWeakAlphaOk { - // Committee elections won't happen the first round. - if err := advanceEpoch(); err != nil { // Epoch 3 - return epoch, err - } - // And nodes are ineligible to be elected till their registration - // epoch + 2. - if err := advanceEpoch(); err != nil { // Epoch 4 (or 3 if byzantine test) - return epoch, err - } - } - if !sc.debugNoRandomInitialEpoch { - // To prevent people from writing tests that depend on very precicse - // timekeeping by epoch, randomize the start epoch slightly. - // - // If this causes your test to fail, it is not this code that is - // wrong, it is the test that is wrong. - var randByte [1]byte - _, _ = rand.Read(randByte[:]) - numSkips := (int)(randByte[0]&3) + 1 - sc.Logger.Info("advancing the epoch to prevent hardcoding time assumptions in tests", - "num_advances", numSkips, - ) - for i := 0; i < numSkips; i++ { - if err := advanceEpoch(); err != nil { - return epoch, err - } - } - } - } - - return epoch, nil -} - -// RegisterScenarios registers all end-to-end scenarios. -func RegisterScenarios() error { - // Register non-scenario-specific parameters. - cmd.RegisterScenarioParams(ParamsDummyScenario.Name(), ParamsDummyScenario.Parameters()) - - // Register default scenarios which are executed, if no test names provided. - for _, s := range []scenario.Scenario{ - // Runtime test. - Runtime, - RuntimeEncryption, - RuntimeGovernance, - RuntimeMessage, - // Byzantine executor node. - ByzantineExecutorHonest, - ByzantineExecutorSchedulerHonest, - ByzantineExecutorWrong, - ByzantineExecutorSchedulerWrong, - ByzantineExecutorSchedulerBogus, - ByzantineExecutorStraggler, - ByzantineExecutorStragglerBackup, - ByzantineExecutorSchedulerStraggler, - ByzantineExecutorFailureIndicating, - ByzantineExecutorSchedulerFailureIndicating, - ByzantineExecutorCorruptGetDiff, - // Storage sync test. - StorageSync, - StorageSyncFromRegistered, - StorageSyncInconsistent, - StorageEarlyStateSync, - // Sentry test. - Sentry, - // Keymanager tests. - KeymanagerMasterSecrets, - KeymanagerEphemeralSecrets, - KeymanagerDumpRestore, - KeymanagerRestart, - KeymanagerReplicate, - KeymanagerReplicateMany, - KeymanagerRotationFailure, - KeymanagerUpgrade, - // Dump/restore test. - DumpRestore, - DumpRestoreRuntimeRoundAdvance, - // Halt test. - HaltRestore, - HaltRestoreSuspended, - HaltRestoreNonMock, - // Consensus upgrade tests. - GovernanceConsensusUpgrade, - GovernanceConsensusFailUpgrade, - GovernanceConsensusCancelUpgrade, - // Multiple runtimes test. - MultipleRuntimes, - // Node shutdown test. - NodeShutdown, - OffsetRestart, - // Gas fees tests. - GasFeesRuntimes, - // Runtime prune test. - RuntimePrune, - // Runtime dynamic registration test. - RuntimeDynamic, - // Transaction source test. - TxSourceMultiShort, - // Late start test. - LateStart, - // RuntimeUpgrade test. - RuntimeUpgrade, - // HistoryReindex test. - HistoryReindex, - // TrustRoot test. - TrustRoot, - TrustRootChangeTest, - TrustRootChangeFailsTest, - // Archive node API test. - ArchiveAPI, - } { - if err := cmd.Register(s); err != nil { - return err - } - } - - // Register non-default scenarios which are executed on-demand only. - for _, s := range []scenario.Scenario{ - // Transaction source test. Non-default, because it runs for ~6 hours. - TxSourceMulti, - // SGX version of the txsource-multi-short test. Non-default, because - // it is identical to the txsource-multi-short, only using fewer nodes - // due to SGX CI instance resource constrains. - TxSourceMultiShortSGX, - } { - if err := cmd.RegisterNondefault(s); err != nil { - return err - } - } - - return nil -} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime_client.go deleted file mode 100644 index 3058c3ac009..00000000000 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client.go +++ /dev/null @@ -1,20 +0,0 @@ -package runtime - -import ( - "context" - - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" -) - -// TestClient is the interface exposed to implement a runtime test -// client that executes a pre-determined workload against a given runtime. -type TestClient interface { - Init(*Scenario) error - Start(context.Context, *env.Env) error - Stop() error - Wait() error - - // Clone returns a clone of a RuntimeTestClient instance, in a state - // that is ready for Init. - Clone() TestClient -} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_dynamic.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime_dynamic.go index 8db93f4a4eb..298c88d5200 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_dynamic.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime_dynamic.go @@ -3,17 +3,13 @@ package runtime import ( "context" "fmt" - "path/filepath" "time" beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" - "github.com/oasisprotocol/oasis-core/go/common" "github.com/oasisprotocol/oasis-core/go/common/crypto/signature" "github.com/oasisprotocol/oasis-core/go/common/quantity" - "github.com/oasisprotocol/oasis-core/go/common/sgx" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction" - keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" @@ -97,7 +93,7 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error cli := cli.New(childEnv, sc.Net, sc.Logger) // Wait for all nodes to be synced before we proceed. - if err := sc.waitNodesSynced(ctx); err != nil { + if err := sc.WaitNodesSynced(ctx); err != nil { return err } @@ -130,65 +126,20 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error kmRt := sc.Net.Runtimes()[0] rtDsc := kmRt.ToRuntimeDescriptor() rtDsc.Deployments[0].ValidFrom = epoch + 1 - kmTxPath := filepath.Join(childEnv.Dir(), "register_km_runtime.json") - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), rtDsc, nonce, kmTxPath); err != nil { - return fmt.Errorf("failed to generate register KM runtime tx: %w", err) + if err = sc.RegisterRuntime(ctx, childEnv, cli, rtDsc, nonce); err != nil { + return err } nonce++ - if err = cli.Consensus.SubmitTx(kmTxPath); err != nil { - return fmt.Errorf("failed to register KM runtime: %w", err) - } // Generate and update the new keymanager runtime's policy. - kmPolicyPath := filepath.Join(childEnv.Dir(), "km_policy.cbor") - kmPolicySig1Path := filepath.Join(childEnv.Dir(), "km_policy_sig1.pem") - kmPolicySig2Path := filepath.Join(childEnv.Dir(), "km_policy_sig2.pem") - kmPolicySig3Path := filepath.Join(childEnv.Dir(), "km_policy_sig3.pem") - kmUpdateTxPath := filepath.Join(childEnv.Dir(), "km_gen_update.json") - sc.Logger.Info("building KM SGX policy enclave policies map") - enclavePolicies := make(map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) - kmRtEncID := kmRt.GetEnclaveIdentity(0) - var havePolicy bool - if kmRtEncID != nil { - enclavePolicies[*kmRtEncID] = &keymanager.EnclavePolicySGX{} - enclavePolicies[*kmRtEncID].MayQuery = make(map[common.Namespace][]sgx.EnclaveIdentity) - enclavePolicies[*kmRtEncID].MayReplicate = []sgx.EnclaveIdentity{} - for _, rt := range sc.Net.Runtimes() { - if rt.Kind() != registry.KindCompute { - continue - } - if eid := rt.GetEnclaveIdentity(0); eid != nil { - enclavePolicies[*kmRtEncID].MayQuery[rt.ID()] = []sgx.EnclaveIdentity{*eid} - // This is set only in SGX mode. - havePolicy = true - } - } - } - sc.Logger.Info("initing KM policy") - if err = cli.Keymanager.InitPolicy(kmRt.ID(), 1, 0, enclavePolicies, kmPolicyPath); err != nil { - return err - } - sc.Logger.Info("signing KM policy") - if err = cli.Keymanager.SignPolicy("1", kmPolicyPath, kmPolicySig1Path); err != nil { - return err - } - if err = cli.Keymanager.SignPolicy("2", kmPolicyPath, kmPolicySig2Path); err != nil { - return err - } - if err = cli.Keymanager.SignPolicy("3", kmPolicyPath, kmPolicySig3Path); err != nil { + policies, err := sc.BuildEnclavePolicies(childEnv) + if err != nil { return err } - if havePolicy { - // In SGX mode, we can update the policy as intended. - sc.Logger.Info("updating KM policy") - if err = cli.Keymanager.GenUpdate(nonce, kmPolicyPath, []string{kmPolicySig1Path, kmPolicySig2Path, kmPolicySig3Path}, kmUpdateTxPath); err != nil { - return err - } - nonce++ - if err = cli.Consensus.SubmitTx(kmUpdateTxPath); err != nil { - return fmt.Errorf("failed to update KM policy: %w", err) - } - } else { + switch policies { + case nil: + sc.Logger.Info("no SGX runtimes, skipping policy update") + // In non-SGX mode, the policy update fails with a policy checksum // mismatch (the non-SGX KM returns an empty policy), so we need to // do an epoch transition instead (to complete the KM runtime @@ -196,6 +147,12 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error if err = sc.epochTransition(ctx); err != nil { return err } + default: + // In SGX mode, we can update the policy as intended. + if err = sc.ApplyKeyManagerPolicy(ctx, childEnv, cli, 0, policies, nonce); err != nil { + return err + } + nonce++ } // Wait for key manager nodes to register, then make another epoch transition. @@ -221,14 +178,10 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error compRt := sc.Net.Runtimes()[1] compRtDesc := compRt.ToRuntimeDescriptor() compRtDesc.Deployments[0].ValidFrom = epoch + 1 - txPath := filepath.Join(childEnv.Dir(), "register_compute_runtime.json") - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), compRtDesc, nonce, txPath); err != nil { - return fmt.Errorf("failed to generate register compute runtime tx: %w", err) + if err = sc.RegisterRuntime(ctx, childEnv, cli, compRtDesc, nonce); err != nil { + return err } nonce++ - if err = cli.Consensus.SubmitTx(txPath); err != nil { - return fmt.Errorf("failed to register compute runtime: %w", err) - } // Wait for compute workers to become ready. sc.Logger.Info("waiting for compute workers to initialize", @@ -259,7 +212,7 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error sc.Logger.Info("submitting transaction to runtime", "seq", i, ) - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, rtNonce, "hello", fmt.Sprintf("world %d", i), false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, rtNonce, "hello", fmt.Sprintf("world %d", i), false, 0); err != nil { return err } rtNonce++ @@ -367,7 +320,7 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error // Submit a runtime transaction to check whether the runtimes got resumed. sc.Logger.Info("submitting transaction to runtime") - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, rtNonce, "hello", "final world", false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, rtNonce, "hello", "final world", false, 0); err != nil { return err } rtNonce++ @@ -538,17 +491,14 @@ func (sc *runtimeDynamicImpl) Run(ctx context.Context, childEnv *env.Env) error return fmt.Errorf("failed to escrow stake: %w", err) } // Update the runtime governance model. - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), compRtDesc, nonce, txPath); err != nil { - return fmt.Errorf("failed to generate register compute runtime tx: %w", err) + if err = sc.RegisterRuntime(ctx, childEnv, cli, compRtDesc, nonce); err != nil { + return err } nonce++ // nolint: ineffassign - if err = cli.Consensus.SubmitTx(txPath); err != nil { - return fmt.Errorf("failed to register compute runtime: %w", err) - } // Submit a runtime transaction to check whether the runtimes got resumed. sc.Logger.Info("submitting transaction to runtime") - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, rtNonce, "hello", "final world for sure", false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, rtNonce, "hello", "final world for sure", false, 0); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_message.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime_message.go index 3966c513011..6fd6bf8c0b3 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_message.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime_message.go @@ -65,7 +65,7 @@ func (sc *runtimeMessageImpl) Run(ctx context.Context, childEnv *env.Env) error c := sc.Net.ClientController().RuntimeClient - blkCh, sub, err := c.WatchBlocks(ctx, runtimeID) + blkCh, sub, err := c.WatchBlocks(ctx, KeyValueRuntimeID) if err != nil { return err } @@ -78,7 +78,7 @@ func (sc *runtimeMessageImpl) Run(ctx context.Context, childEnv *env.Env) error // contain message results of the consensus transfer. sc.Logger.Debug("submitting consensus_transfer runtime transaction") var txMetaResponse *api.SubmitTxMetaResponse - if txMetaResponse, err = sc.submitConsensusXferTxMeta(ctx, runtimeID, staking.Transfer{}, 0); err != nil { + if txMetaResponse, err = sc.submitConsensusXferTxMeta(ctx, KeyValueRuntimeID, staking.Transfer{}, 0); err != nil { return err } if _, err = unpackRawTxResp(txMetaResponse.Output); err != nil { @@ -110,7 +110,7 @@ func (sc *runtimeMessageImpl) Run(ctx context.Context, childEnv *env.Env) error return fmt.Errorf("expected normal round, got: %d", ht) } txs, err := c.GetTransactions(ctx, &api.GetTransactionsRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: round, }) if err != nil { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_prune.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime_prune.go index d2bdb360261..0625c5b5ade 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_prune.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime_prune.go @@ -81,7 +81,7 @@ func (sc *runtimePruneImpl) Run(ctx context.Context, childEnv *env.Env) error { "seq", i, ) - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, uint64(i), "hello", fmt.Sprintf("world %d", i), false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, uint64(i), "hello", fmt.Sprintf("world %d", i), false, 0); err != nil { return err } } @@ -92,7 +92,7 @@ func (sc *runtimePruneImpl) Run(ctx context.Context, childEnv *env.Env) error { // Once the transactions are complete, check if blocks got pruned. sc.Logger.Info("fetching latest block") latestBlk, err := c.GetBlock(ctx, &api.GetBlockRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: api.RoundLatest, }) if err != nil { @@ -104,7 +104,7 @@ func (sc *runtimePruneImpl) Run(ctx context.Context, childEnv *env.Env) error { ) for i := uint64(0); i <= latestBlk.Header.Round; i++ { _, err = c.GetBlock(ctx, &api.GetBlockRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: i, }) if i <= latestBlk.Header.Round-pruneNumKept { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_upgrade.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime_upgrade.go index f9f3e60a142..487ac617e15 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_upgrade.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime_upgrade.go @@ -1,23 +1,13 @@ package runtime import ( - "bytes" "context" - "fmt" - "path/filepath" "time" - "github.com/oasisprotocol/oasis-core/go/common" - "github.com/oasisprotocol/oasis-core/go/common/sgx" - "github.com/oasisprotocol/oasis-core/go/common/version" - consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" - keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" - commonWorker "github.com/oasisprotocol/oasis-core/go/worker/common/api" ) // RuntimeUpgrade is the runtime upgrade scenario. @@ -28,8 +18,6 @@ const versionActivationTimeout = 15 * time.Second type runtimeUpgradeImpl struct { Scenario - nonce uint64 - upgradedRuntimeIndex int } @@ -37,7 +25,7 @@ func newRuntimeUpgradeImpl() scenario.Scenario { return &runtimeUpgradeImpl{ Scenario: *NewScenario( "runtime-upgrade", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -48,40 +36,9 @@ func (sc *runtimeUpgradeImpl) Fixture() (*oasis.NetworkFixture, error) { return nil, err } - // Get number of compute runtimes. - computeIndex := -1 - for i := range f.Runtimes { - if f.Runtimes[i].Kind == registry.KindCompute { - computeIndex = i - break - } - } - if computeIndex == -1 { - return nil, fmt.Errorf("expected at least one compute runtime in the fixture, none found") - } - - // Load the upgraded runtime binary. - newRuntimeBinaries := sc.resolveRuntimeBinaries("simple-keyvalue-upgrade") - - // Setup the upgraded runtime (first is keymanager, others should be generic compute). - runtimeFix := f.Runtimes[computeIndex] - runtimeFix.Deployments = append([]oasis.DeploymentCfg{}, runtimeFix.Deployments...) - runtimeFix.Deployments = append(runtimeFix.Deployments, oasis.DeploymentCfg{ - Version: version.Version{Major: 0, Minor: 1, Patch: 0}, - Binaries: newRuntimeBinaries, - }) - - // The upgraded runtime will be registered later. - runtimeFix.ExcludeFromGenesis = true - sc.upgradedRuntimeIndex = len(f.Runtimes) - f.Runtimes = append(f.Runtimes, runtimeFix) - - // Compute nodes should include the upgraded runtime version. - for i := range f.ComputeWorkers { - f.ComputeWorkers[i].Runtimes = []int{sc.upgradedRuntimeIndex} + if sc.upgradedRuntimeIndex, err = sc.UpgradeComputeRuntimeFixture(f); err != nil { + return nil, err } - // The client node should include the upgraded runtime version. - f.Clients[0].Runtimes = []int{sc.upgradedRuntimeIndex} return f, nil } @@ -92,208 +49,24 @@ func (sc *runtimeUpgradeImpl) Clone() scenario.Scenario { } } -func (sc *runtimeUpgradeImpl) applyUpgradePolicy(childEnv *env.Env) error { - cli := cli.New(childEnv, sc.Net, sc.Logger) - - kmPolicyPath := filepath.Join(childEnv.Dir(), "km_policy.cbor") - kmPolicySig1Path := filepath.Join(childEnv.Dir(), "km_policy_sig1.pem") - kmPolicySig2Path := filepath.Join(childEnv.Dir(), "km_policy_sig2.pem") - kmPolicySig3Path := filepath.Join(childEnv.Dir(), "km_policy_sig3.pem") - kmUpdateTxPath := filepath.Join(childEnv.Dir(), "km_gen_update.json") - - kmRuntime := sc.Net.Runtimes()[0] - oldRuntime := sc.Net.Runtimes()[1] - newRuntime := sc.Net.Runtimes()[2] - // Sanity check fixture. - if err := func() error { - if kmRuntime.Kind() != registry.KindKeyManager { - return fmt.Errorf("keymanager runtime not of kind KindKeyManager") - } - if oldRuntime.Kind() != registry.KindCompute { - return fmt.Errorf("old runtime not of kind KindCompute") - } - if newRuntime.Kind() != registry.KindCompute { - return fmt.Errorf("new runtime not of kind KindCompute") - } - if oldRuntime.ID() != newRuntime.ID() { - return fmt.Errorf("runtime ID mismatch") - } - return nil - }(); err != nil { - return fmt.Errorf("runtimes fixture sanity check: %w", err) - } - - kmRuntimeEncID := kmRuntime.GetEnclaveIdentity(0) - oldRuntimeEncID := oldRuntime.GetEnclaveIdentity(0) - newRuntimeEncID := newRuntime.GetEnclaveIdentity(1) - - if oldRuntimeEncID == nil && newRuntimeEncID == nil { - sc.Logger.Info("No SGX runtimes, skipping policy update") - return nil - } - - // Ensure enclave IDs differ between the old and new runtimes. - oldEncID, _ := oldRuntimeEncID.MarshalText() - newEncID, _ := newRuntimeEncID.MarshalText() - if bytes.Equal(oldEncID, newEncID) { - return fmt.Errorf("expected different enclave identities, got: %s", newEncID) - } - - // Build updated SGX policies. - sc.Logger.Info("building new KM SGX policy enclave policies map") - enclavePolicies := make(map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) - - enclavePolicies[*kmRuntimeEncID] = &keymanager.EnclavePolicySGX{} - enclavePolicies[*kmRuntimeEncID].MayQuery = map[common.Namespace][]sgx.EnclaveIdentity{ - // Allow both old and new compute runtimes to query private data. - newRuntime.ID(): { - *oldRuntimeEncID, - *newRuntimeEncID, - }, - } - - sc.Logger.Info("initing updated KM policy") - if err := cli.Keymanager.InitPolicy(kmRuntime.ID(), 2, 0, enclavePolicies, kmPolicyPath); err != nil { - return err - } - sc.Logger.Info("signing updated KM policy") - if err := cli.Keymanager.SignPolicy("1", kmPolicyPath, kmPolicySig1Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("2", kmPolicyPath, kmPolicySig2Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("3", kmPolicyPath, kmPolicySig3Path); err != nil { - return err - } - - sc.Logger.Info("updating KM policy") - if err := cli.Keymanager.GenUpdate(sc.nonce, kmPolicyPath, []string{kmPolicySig1Path, kmPolicySig2Path, kmPolicySig3Path}, kmUpdateTxPath); err != nil { - return err - } - if err := cli.Consensus.SubmitTx(kmUpdateTxPath); err != nil { - return fmt.Errorf("failed to update KM policy: %w", err) - } - sc.nonce++ - - return nil -} - -func (sc *runtimeUpgradeImpl) ensureActiveVersion(ctx context.Context, v version.Version) error { - ctx, cancel := context.WithTimeout(ctx, versionActivationTimeout) - defer cancel() - - rt := sc.Net.Runtimes()[sc.upgradedRuntimeIndex] - - sc.Logger.Info("ensuring that all compute workers have the correct active version", - "version", v, - ) - - for _, node := range sc.Net.ComputeWorkers() { - nodeCtrl, err := oasis.NewController(node.SocketPath()) - if err != nil { - return fmt.Errorf("%s: failed to create controller: %w", node.Name, err) - } - - // Wait for the version to become active and ensure no suspension observed. - for { - status, err := nodeCtrl.GetStatus(ctx) - if err != nil { - return fmt.Errorf("%s: failed to query status: %w", node.Name, err) - } - - provisioner := status.Runtimes[rt.ID()].Provisioner - if provisioner != "sandbox" && provisioner != "sgx" { - return fmt.Errorf("%s: unexpected runtime provisioner for runtime '%s': %s", node.Name, rt.ID(), provisioner) - } - - cs := status.Runtimes[rt.ID()].Committee - if cs == nil { - return fmt.Errorf("%s: missing status for runtime '%s'", node.Name, rt.ID()) - } - - if cs.ActiveVersion == nil { - return fmt.Errorf("%s: no version is active", node.Name) - } - // Retry if not yet activated. - if cs.ActiveVersion.ToU64() < v.ToU64() { - time.Sleep(1 * time.Second) - continue - } - if *cs.ActiveVersion != v { - return fmt.Errorf("%s: unexpected active version (expected: %s got: %s)", node.Name, v, cs.ActiveVersion) - } - if cs.Status != commonWorker.StatusStateReady { - return fmt.Errorf("%s: runtime is not ready (got: %s)", node.Name, cs.Status) - } - break - } - } - return nil -} - func (sc *runtimeUpgradeImpl) Run(ctx context.Context, childEnv *env.Env) error { cli := cli.New(childEnv, sc.Net, sc.Logger) - if err := sc.StartNetworkAndTestClient(ctx, childEnv); err != nil { + // Start the network and run the test client. + if err := sc.StartNetworkAndWaitForClientSync(ctx); err != nil { return err } - sc.Logger.Info("waiting for client to exit") - // Wait for the client to exit. - if err := sc.WaitTestClientOnly(); err != nil { + if err := sc.RunTestClientAndCheckLogs(ctx, childEnv); err != nil { return err } - // Make sure the old version is active on all compute nodes. - if err := sc.ensureActiveVersion(ctx, version.MustFromString("0.0.0")); err != nil { - return err - } - - // Generate and update a policy that will allow the new runtime to run. - if err := sc.applyUpgradePolicy(childEnv); err != nil { - return fmt.Errorf("updating policies: %w", err) - } - - // Fetch current epoch. - epoch, err := sc.Net.Controller().Beacon.GetEpoch(ctx, consensus.HeightLatest) - if err != nil { - return fmt.Errorf("failed to get current epoch: %w", err) - } - upgradeEpoch := epoch + 3 - - // Update runtime to include the new enclave identity. - sc.Logger.Info("updating runtime descriptor") - newRt := sc.Net.Runtimes()[sc.upgradedRuntimeIndex] - newRtDsc := newRt.ToRuntimeDescriptor() - newRtDsc.Deployments[1].ValidFrom = upgradeEpoch - - newTxPath := filepath.Join(childEnv.Dir(), "register_update_compute_runtime.json") - if err := cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), newRtDsc, sc.nonce, newTxPath); err != nil { - return fmt.Errorf("failed to generate register compute runtime tx: %w", err) - } - sc.nonce++ - if err := cli.Consensus.SubmitTx(newTxPath); err != nil { - return fmt.Errorf("failed to update compute runtime: %w", err) - } - - // Wait for activation epoch. - sc.Logger.Info("waiting for runtime upgrade epoch", - "epoch", upgradeEpoch, - ) - if err := sc.Net.Controller().Beacon.WaitEpoch(ctx, upgradeEpoch); err != nil { - return fmt.Errorf("failed to wait for epoch: %w", err) - } - - // Make sure the new version is active. - if err := sc.ensureActiveVersion(ctx, version.MustFromString("0.1.0")); err != nil { + // Upgrade the compute runtime. + if err := sc.UpgradeComputeRuntime(ctx, childEnv, cli, sc.upgradedRuntimeIndex, 0); err != nil { return err } // Run client again. sc.Logger.Info("starting a second client to check if runtime works") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) - if err := sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/scenario.go b/go/oasis-test-runner/scenario/e2e/runtime/scenario.go new file mode 100644 index 00000000000..b558ee99698 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/runtime/scenario.go @@ -0,0 +1,383 @@ +package runtime + +import ( + "context" + "fmt" + "time" + + beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" + "github.com/oasisprotocol/oasis-core/go/common/node" + "github.com/oasisprotocol/oasis-core/go/common/sgx" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/cmd" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/log" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario/e2e" + registry "github.com/oasisprotocol/oasis-core/go/registry/api" + runtimeConfig "github.com/oasisprotocol/oasis-core/go/runtime/config" + scheduler "github.com/oasisprotocol/oasis-core/go/scheduler/api" +) + +const ( + cfgRuntimeBinaryDirDefault = "runtime.binary_dir.default" + cfgRuntimeBinaryDirIntelSGX = "runtime.binary_dir.intel-sgx" + cfgRuntimeSourceDir = "runtime.source_dir" + cfgRuntimeTargetDir = "runtime.target_dir" + cfgRuntimeLoader = "runtime.loader" + cfgRuntimeProvisioner = "runtime.provisioner" + cfgTEEHardware = "tee_hardware" + cfgIasMock = "ias.mock" + cfgEpochInterval = "epoch.interval" +) + +var ( + // ParamsDummyScenario is a dummy instance of runtimeImpl used to register global e2e/runtime flags. + ParamsDummyScenario = NewScenario("", nil) + + // Runtime is the basic network + client test case with runtime support. + Runtime scenario.Scenario = NewScenario( + "runtime", + NewTestClient().WithScenario(SimpleKeyValueScenario), + ) + + // RuntimeEncryption is the basic network + client with encryption test case. + RuntimeEncryption scenario.Scenario = NewScenario( + "runtime-encryption", + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + ) + + // DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher + // handler factories for the basic scenario. + DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{ + oasis.LogAssertNoTimeouts(), + oasis.LogAssertNoRoundFailures(), + oasis.LogAssertNoExecutionDiscrepancyDetected(), + } +) + +// Scenario is a base class for tests involving oasis-node with runtime. +type Scenario struct { + e2e.Scenario + + TestClient *TestClient + + // This disables the random initial epoch for tests that are extremely + // sensitive to the initial epoch. Ideally this shouldn't be set for + // any of our tests, but I'm sick and tired of trying to debug poorly + // written test cases. + // + // If your new test needs this, your test is bad, and you should go + // and rewrite it so that this option isn't set. + debugNoRandomInitialEpoch bool + + // The byzantine tests also explode since the node only runs for + // a single epoch. + // + // If your new test needs this, your test is bad, and you should go + // and rewrite it so that this option isn't set. + debugWeakAlphaOk bool +} + +// NewScenario creates a new base scenario for oasis-node runtime end-to-end tests. +func NewScenario(name string, testClient *TestClient) *Scenario { + // Empty scenario name is used for registering global parameters only. + fullName := "runtime" + if name != "" { + fullName += "/" + name + } + + sc := &Scenario{ + Scenario: *e2e.NewScenario(fullName), + TestClient: testClient, + } + sc.Flags.String(cfgRuntimeBinaryDirDefault, "", "(no-TEE) path to the runtime binaries directory") + sc.Flags.String(cfgRuntimeBinaryDirIntelSGX, "", "(Intel SGX) path to the runtime binaries directory") + sc.Flags.String(cfgRuntimeSourceDir, "", "path to the runtime source base dir") + sc.Flags.String(cfgRuntimeTargetDir, "", "path to the Cargo target dir (should be a parent of the runtime binary dir)") + sc.Flags.String(cfgRuntimeLoader, "oasis-core-runtime-loader", "path to the runtime loader") + sc.Flags.String(cfgRuntimeProvisioner, "sandboxed", "the runtime provisioner: mock, unconfined, or sandboxed") + sc.Flags.String(cfgTEEHardware, "", "TEE hardware to use") + sc.Flags.Bool(cfgIasMock, true, "if mock IAS service should be used") + sc.Flags.Int64(cfgEpochInterval, 0, "epoch interval") + + return sc +} + +func (sc *Scenario) Clone() scenario.Scenario { + var testClient *TestClient + if sc.TestClient != nil { + testClient = sc.TestClient.Clone() + } + return &Scenario{ + Scenario: sc.Scenario.Clone(), + TestClient: testClient, + debugNoRandomInitialEpoch: sc.debugNoRandomInitialEpoch, + debugWeakAlphaOk: sc.debugWeakAlphaOk, + } +} + +func (sc *Scenario) PreInit(childEnv *env.Env) error { + return nil +} + +func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error) { + f, err := sc.Scenario.Fixture() + if err != nil { + return nil, err + } + + tee, err := sc.TEEHardware() + if err != nil { + return nil, err + } + var mrSigner *sgx.MrSigner + if tee == node.TEEHardwareIntelSGX { + mrSigner = &sgx.FortanixDummyMrSigner + } + runtimeLoader, _ := sc.Flags.GetString(cfgRuntimeLoader) + iasMock, _ := sc.Flags.GetBool(cfgIasMock) + runtimeProvisionerRaw, _ := sc.Flags.GetString(cfgRuntimeProvisioner) + var runtimeProvisioner runtimeConfig.RuntimeProvisioner + if err = runtimeProvisioner.UnmarshalText([]byte(runtimeProvisionerRaw)); err != nil { + return nil, fmt.Errorf("failed to parse runtime provisioner: %w", err) + } + + ff := &oasis.NetworkFixture{ + TEE: oasis.TEEFixture{ + Hardware: tee, + MrSigner: mrSigner, + }, + Network: oasis.NetworkCfg{ + NodeBinary: f.Network.NodeBinary, + RuntimeSGXLoaderBinary: runtimeLoader, + DefaultLogWatcherHandlerFactories: DefaultRuntimeLogWatcherHandlerFactories, + Consensus: f.Network.Consensus, + IAS: oasis.IASCfg{ + Mock: iasMock, + }, + }, + Entities: []oasis.EntityCfg{ + {IsDebugTestEntity: true}, + {}, + }, + Runtimes: []oasis.RuntimeFixture{ + // Key manager runtime. + { + ID: KeyManagerRuntimeID, + Kind: registry.KindKeyManager, + Entity: 0, + Keymanager: -1, + AdmissionPolicy: registry.RuntimeAdmissionPolicy{ + AnyNode: ®istry.AnyNodeRuntimeAdmissionPolicy{}, + }, + GovernanceModel: registry.GovernanceEntity, + Deployments: []oasis.DeploymentCfg{ + { + Binaries: sc.ResolveRuntimeBinaries(KeyManagerRuntimeBinary), + }, + }, + }, + // Compute runtime. + { + ID: KeyValueRuntimeID, + Kind: registry.KindCompute, + Entity: 0, + Keymanager: 0, + Executor: registry.ExecutorParameters{ + GroupSize: 2, + GroupBackupSize: 1, + RoundTimeout: 20, + MaxMessages: 128, + }, + TxnScheduler: registry.TxnSchedulerParameters{ + MaxBatchSize: 100, + MaxBatchSizeBytes: 1024 * 1024, + BatchFlushTimeout: 1 * time.Second, + ProposerTimeout: 20, + MaxInMessages: 128, + }, + AdmissionPolicy: registry.RuntimeAdmissionPolicy{ + AnyNode: ®istry.AnyNodeRuntimeAdmissionPolicy{}, + }, + Constraints: map[scheduler.CommitteeKind]map[scheduler.Role]registry.SchedulingConstraints{ + scheduler.KindComputeExecutor: { + scheduler.RoleWorker: { + MinPoolSize: ®istry.MinPoolSizeConstraint{ + Limit: 2, + }, + }, + scheduler.RoleBackupWorker: { + MinPoolSize: ®istry.MinPoolSizeConstraint{ + Limit: 1, + }, + }, + }, + }, + GovernanceModel: registry.GovernanceEntity, + Deployments: []oasis.DeploymentCfg{ + { + Binaries: sc.ResolveRuntimeBinaries(KeyValueRuntimeBinary), + }, + }, + }, + }, + Validators: []oasis.ValidatorFixture{ + {Entity: 1, Consensus: oasis.ConsensusFixture{SupplementarySanityInterval: 1}}, + {Entity: 1, Consensus: oasis.ConsensusFixture{}}, + {Entity: 1, Consensus: oasis.ConsensusFixture{}}, + }, + KeymanagerPolicies: []oasis.KeymanagerPolicyFixture{ + {Runtime: 0, Serial: 1, MasterSecretRotationInterval: 0}, + }, + Keymanagers: []oasis.KeymanagerFixture{ + { + RuntimeProvisioner: runtimeProvisioner, + Runtime: 0, + Entity: 1, + Policy: 0, + SkipPolicy: tee != node.TEEHardwareIntelSGX, + }, + }, + ComputeWorkers: []oasis.ComputeWorkerFixture{ + {RuntimeProvisioner: runtimeProvisioner, Entity: 1, Runtimes: []int{1}}, + { + RuntimeProvisioner: runtimeProvisioner, + Entity: 1, + Runtimes: []int{1}, + RuntimeConfig: map[int]map[string]interface{}{ + 1: { + "core": map[string]interface{}{ + "min_gas_price": 1, // Just to test support for runtime configuration. + }, + }, + }, + }, + {RuntimeProvisioner: runtimeProvisioner, Entity: 1, Runtimes: []int{1}}, + }, + Sentries: []oasis.SentryFixture{}, + Seeds: []oasis.SeedFixture{{}}, + Clients: []oasis.ClientFixture{ + {RuntimeProvisioner: runtimeProvisioner, Runtimes: []int{1}}, + }, + } + + if epochInterval, _ := sc.Flags.GetInt64(cfgEpochInterval); epochInterval > 0 { + ff.Network.Beacon.InsecureParameters = &beacon.InsecureParameters{ + Interval: epochInterval, + } + ff.Network.Beacon.VRFParameters = &beacon.VRFParameters{ + AlphaHighQualityThreshold: 3, + Interval: epochInterval, + ProofSubmissionDelay: epochInterval / 2, + } + } + + return ff, nil +} + +func (sc *Scenario) Run(ctx context.Context, childEnv *env.Env) error { + if err := sc.StartNetworkAndTestClient(ctx, childEnv); err != nil { + return err + } + return sc.WaitTestClientAndCheckLogs() +} + +// RegisterScenarios registers all end-to-end scenarios. +func RegisterScenarios() error { + // Register non-scenario-specific parameters. + cmd.RegisterScenarioParams(ParamsDummyScenario.Name(), ParamsDummyScenario.Parameters()) + + // Register default scenarios which are executed, if no test names provided. + for _, s := range []scenario.Scenario{ + // Runtime test. + Runtime, + RuntimeEncryption, + RuntimeGovernance, + RuntimeMessage, + // Byzantine executor node. + ByzantineExecutorHonest, + ByzantineExecutorSchedulerHonest, + ByzantineExecutorWrong, + ByzantineExecutorSchedulerWrong, + ByzantineExecutorSchedulerBogus, + ByzantineExecutorStraggler, + ByzantineExecutorStragglerBackup, + ByzantineExecutorSchedulerStraggler, + ByzantineExecutorFailureIndicating, + ByzantineExecutorSchedulerFailureIndicating, + ByzantineExecutorCorruptGetDiff, + // Storage sync test. + StorageSync, + StorageSyncFromRegistered, + StorageSyncInconsistent, + StorageEarlyStateSync, + // Sentry test. + Sentry, + // Keymanager tests. + KeymanagerMasterSecrets, + KeymanagerEphemeralSecrets, + KeymanagerDumpRestore, + KeymanagerRestart, + KeymanagerReplicate, + KeymanagerReplicateMany, + KeymanagerRotationFailure, + KeymanagerUpgrade, + // Dump/restore test. + DumpRestore, + DumpRestoreRuntimeRoundAdvance, + // Halt test. + HaltRestore, + HaltRestoreSuspended, + HaltRestoreNonMock, + // Consensus upgrade tests. + GovernanceConsensusUpgrade, + GovernanceConsensusFailUpgrade, + GovernanceConsensusCancelUpgrade, + // Multiple runtimes test. + MultipleRuntimes, + // Node shutdown test. + NodeShutdown, + OffsetRestart, + // Gas fees tests. + GasFeesRuntimes, + // Runtime prune test. + RuntimePrune, + // Runtime dynamic registration test. + RuntimeDynamic, + // Transaction source test. + TxSourceMultiShort, + // Late start test. + LateStart, + // RuntimeUpgrade test. + RuntimeUpgrade, + // HistoryReindex test. + HistoryReindex, + // TrustRoot test. + TrustRoot, + TrustRootChangeTest, + TrustRootChangeFailsTest, + // Archive node API test. + ArchiveAPI, + } { + if err := cmd.Register(s); err != nil { + return err + } + } + + // Register non-default scenarios which are executed on-demand only. + for _, s := range []scenario.Scenario{ + // Transaction source test. Non-default, because it runs for ~6 hours. + TxSourceMulti, + // SGX version of the txsource-multi-short test. Non-default, because + // it is identical to the txsource-multi-short, only using fewer nodes + // due to SGX CI instance resource constrains. + TxSourceMultiShortSGX, + } { + if err := cmd.RegisterNondefault(s); err != nil { + return err + } + } + + return nil +} diff --git a/go/oasis-test-runner/scenario/e2e/runtime/sentry.go b/go/oasis-test-runner/scenario/e2e/runtime/sentry.go index 737cce01497..37ce734308c 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/sentry.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/sentry.go @@ -43,7 +43,7 @@ func newSentryImpl() scenario.Scenario { return &sentryImpl{ Scenario: *NewScenario( "sentry", - NewKVTestClient().WithScenario(SimpleKeyValueScenario), + NewTestClient().WithScenario(SimpleKeyValueScenario), ), } } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/storage_early_state_sync.go b/go/oasis-test-runner/scenario/e2e/runtime/storage_early_state_sync.go index e68d34c94b8..dd221d6142e 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/storage_early_state_sync.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/storage_early_state_sync.go @@ -3,7 +3,6 @@ package runtime import ( "context" "fmt" - "path/filepath" "time" beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" @@ -134,12 +133,8 @@ func (sc *storageEarlyStateSyncImpl) Run(ctx context.Context, childEnv *env.Env) compRt := sc.Net.Runtimes()[0] compRtDesc := compRt.ToRuntimeDescriptor() compRtDesc.Deployments[0].ValidFrom = epoch + 1 - txPath := filepath.Join(childEnv.Dir(), "register_compute_runtime.json") - if err = cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), compRtDesc, 0, txPath); err != nil { - return fmt.Errorf("failed to generate register compute runtime tx: %w", err) - } - if err = cli.Consensus.SubmitTx(txPath); err != nil { - return fmt.Errorf("failed to register compute runtime: %w", err) + if err = sc.RegisterRuntime(ctx, childEnv, cli, compRtDesc, 0); err != nil { + return err } // Wait some epoch transitions. diff --git a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync.go b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync.go index cd0dee83c93..a7274663fa6 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync.go @@ -26,7 +26,7 @@ func newStorageSyncImpl() scenario.Scenario { return &storageSyncImpl{ Scenario: *NewScenario( "storage-sync", - NewKVTestClient().WithScenario(SimpleKeyValueScenario), + NewTestClient().WithScenario(SimpleKeyValueScenario), ), } } @@ -93,7 +93,7 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / } // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -106,7 +106,7 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / sc.Logger.Info("submitting transaction to runtime", "seq", i, ) - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, drbg.Uint64(), "checkpoint", fmt.Sprintf("my cp %d", i), false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, drbg.Uint64(), "checkpoint", fmt.Sprintf("my cp %d", i), false, 0); err != nil { return err } } @@ -117,13 +117,13 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / return fmt.Errorf("failed to connect with the first compute node: %w", err) } - cps, err := ctrl.Storage.GetCheckpoints(ctx, &checkpoint.GetCheckpointsRequest{Version: 1, Namespace: runtimeID}) + cps, err := ctrl.Storage.GetCheckpoints(ctx, &checkpoint.GetCheckpointsRequest{Version: 1, Namespace: KeyValueRuntimeID}) if err != nil { return fmt.Errorf("failed to get checkpoints: %w", err) } blk, err := ctrl.RuntimeClient.GetBlock(ctx, &runtimeClient.GetBlockRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: runtimeClient.RoundLatest, }) if err != nil { @@ -147,7 +147,7 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / var validCps int for checkpoint := rt.Storage.CheckpointInterval; checkpoint <= lastCheckpoint; checkpoint += rt.Storage.CheckpointInterval { blk, err = ctrl.RuntimeClient.GetBlock(ctx, &runtimeClient.GetBlockRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: checkpoint, }) if err != nil { @@ -183,7 +183,7 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / sc.Logger.Info("submitting large transaction to runtime", "seq", i, ) - if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, runtimeID, drbg.Uint64(), fmt.Sprintf("%d key %d", i, i), fmt.Sprintf("my cp %d: ", i)+largeVal, false, 0); err != nil { + if _, err = sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, drbg.Uint64(), fmt.Sprintf("%d key %d", i, i), fmt.Sprintf("my cp %d: ", i)+largeVal, false, 0); err != nil { return err } } @@ -230,10 +230,10 @@ func (sc *storageSyncImpl) Run(ctx context.Context, childEnv *env.Env) error { / if err != nil { return fmt.Errorf("error getting status for second late compute worker: %w", err) } - lr := status.Runtimes[runtimeID].LastRetainedRound + lr := status.Runtimes[KeyValueRuntimeID].LastRetainedRound _, err = ctrl.RuntimeClient.GetTransactions(ctx, &runtimeClient.GetTransactionsRequest{ - RuntimeID: runtimeID, + RuntimeID: KeyValueRuntimeID, Round: lr, }) if err != nil { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_from_registered.go b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_from_registered.go index e95f77280ef..6c9f69380aa 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_from_registered.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_from_registered.go @@ -26,7 +26,7 @@ func newStorageSyncFromRegisteredImpl() scenario.Scenario { return &storageSyncFromRegisteredImpl{ Scenario: *NewScenario( "storage-sync-registered", - NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), + NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ), } } @@ -93,7 +93,7 @@ func (sc *storageSyncFromRegisteredImpl) Run(ctx context.Context, childEnv *env. nextEpoch++ // Wait for the client to exit. - if err = sc.WaitTestClientOnly(); err != nil { + if err = sc.WaitTestClient(); err != nil { return err } @@ -182,9 +182,6 @@ func (sc *storageSyncFromRegisteredImpl) Run(ctx context.Context, childEnv *env. // Run the client again. sc.Logger.Info("starting a second client to check if runtime works with compute worker 1") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) - if err = sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - return sc.waitTestClient() + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(InsertRemoveKeyValueEncScenarioV2) + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_inconsistent.go b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_inconsistent.go index 5e73229109a..ac83c36257f 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_inconsistent.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/storage_sync_inconsistent.go @@ -29,7 +29,7 @@ func newStorageSyncInconsistentImpl() scenario.Scenario { sc := &storageSyncInconsistentImpl{ Scenario: *NewScenario( "storage-sync-inconsistent", - NewKVTestClient().WithScenario(SimpleKeyValueScenarioRepeated), + NewTestClient().WithScenario(SimpleKeyValueScenarioRepeated), ), } sc.Scenario.debugNoRandomInitialEpoch = true // I give up. @@ -182,8 +182,7 @@ func (sc *storageSyncInconsistentImpl) Run(ctx context.Context, childEnv *env.En // Wait for the client to exit. Odd error handling here; if killing succeeded, then everything // must have been fine up to this point and we can ignore the exit error from the kill. sc.Logger.Info("scenario done, killing client") - testClient := sc.testClient.(*KVTestClient) - if err = testClient.Stop(); err != nil { + if err = sc.TestClient.Stop(); err != nil { if errors.Is(err, context.Canceled) { return nil } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv.go b/go/oasis-test-runner/scenario/e2e/runtime/test_client.go similarity index 83% rename from go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv.go rename to go/oasis-test-runner/scenario/e2e/runtime/test_client.go index 86483a0cb38..513002f980f 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/test_client.go @@ -15,24 +15,29 @@ import ( staking "github.com/oasisprotocol/oasis-core/go/staking/api" ) -// KVTestClient is a client that exercises the simple key-value test runtime. -type KVTestClient struct { +// TestClient is a client that exercises a pre-determined workload against +// the simple key-value runtime. +type TestClient struct { sc *Scenario - seed string scenario TestClientScenario + seed string + rng rand.Source64 + ctx context.Context cancelFn context.CancelFunc errCh chan error } -func (cli *KVTestClient) Init(scenario *Scenario) error { +// Init initializes the test client. +func (cli *TestClient) Init(scenario *Scenario) error { cli.sc = scenario return nil } -func (cli *KVTestClient) Start(ctx context.Context, childEnv *env.Env) error { +// Start starts the test client in a background. +func (cli *TestClient) Start(ctx context.Context, childEnv *env.Env) error { cli.ctx = ctx subCtx, cancelFn := context.WithCancel(ctx) @@ -46,7 +51,8 @@ func (cli *KVTestClient) Start(ctx context.Context, childEnv *env.Env) error { return nil } -func (cli *KVTestClient) Wait() error { +// Wait waits the client to finish its work. +func (cli *TestClient) Wait() error { var err error // Wait for the network to fail, the context to be canceled, or the @@ -63,7 +69,8 @@ func (cli *KVTestClient) Wait() error { return err } -func (cli *KVTestClient) Stop() error { +// Stop stops the client. +func (cli *TestClient) Stop() error { // Kill the workload. cli.cancelFn() @@ -76,48 +83,55 @@ func (cli *KVTestClient) Stop() error { } } -func (cli *KVTestClient) Clone() TestClient { - return &KVTestClient{ +// Clone returns a clone of a test client instance, in a state that is ready for Init. +func (cli *TestClient) Clone() *TestClient { + return &TestClient{ seed: cli.seed, scenario: cli.scenario, } } -func (cli *KVTestClient) WithSeed(seed string) *KVTestClient { +// WithSeed sets the seed. +func (cli *TestClient) WithSeed(seed string) *TestClient { cli.seed = seed + cli.rng = nil return cli } -func (cli *KVTestClient) WithScenario(scenario TestClientScenario) *KVTestClient { +// WithScenario sets the scenario. +func (cli *TestClient) WithScenario(scenario TestClientScenario) *TestClient { cli.scenario = scenario return cli } -func (cli *KVTestClient) workload(ctx context.Context) error { - // Initialize the nonce DRBG. - rng, err := drbgFromSeed( - []byte("oasis-core/oasis-test-runner/e2e/runtime/test-client"), - []byte(cli.seed), - ) - if err != nil { - return err +func (cli *TestClient) workload(ctx context.Context) error { + if cli.rng == nil { + // Initialize the nonce DRBG. + rng, err := drbgFromSeed( + []byte("oasis-core/oasis-test-runner/e2e/runtime/test-client"), + []byte(cli.seed), + ) + if err != nil { + return err + } + cli.rng = rng } cli.sc.Logger.Info("waiting for key managers to generate the first master secret") - if _, err = cli.sc.waitMasterSecret(ctx, 0); err != nil { + if _, err := cli.sc.WaitMasterSecret(ctx, 0); err != nil { return fmt.Errorf("first master secret not generated: %w", err) } // The CometBFT verifier is one block behind, so wait for an additional // two blocks to ensure that the first secret has been loaded. - if _, err = cli.sc.waitBlocks(ctx, 2); err != nil { + if _, err := cli.sc.WaitBlocks(ctx, 2); err != nil { return fmt.Errorf("failed to wait two blocks: %w", err) } cli.sc.Logger.Info("starting k/v runtime test client") if err := cli.scenario(func(req interface{}) error { - return cli.submit(ctx, req, rng) + return cli.submit(ctx, req, cli.rng) }); err != nil { return err } @@ -127,12 +141,12 @@ func (cli *KVTestClient) workload(ctx context.Context) error { return nil } -func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.Source64) error { +func (cli *TestClient) submit(ctx context.Context, req interface{}, rng rand.Source64) error { switch req := req.(type) { case KeyValueQuery: rsp, err := cli.sc.submitKeyValueRuntimeGetQuery( ctx, - runtimeID, + KeyValueRuntimeID, req.Key, req.Round, ) @@ -146,7 +160,7 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S case InsertKeyValueTx: rsp, err := cli.sc.submitKeyValueRuntimeInsertTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), req.Key, req.Value, @@ -163,7 +177,7 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S case GetKeyValueTx: rsp, err := cli.sc.submitKeyValueRuntimeGetTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), req.Key, req.Encrypted, @@ -179,7 +193,7 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S case RemoveKeyValueTx: rsp, err := cli.sc.submitKeyValueRuntimeRemoveTx( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), req.Key, req.Encrypted, @@ -195,7 +209,7 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S case InsertMsg: err := cli.sc.submitKeyValueRuntimeInsertMsg( ctx, - runtimeID, + KeyValueRuntimeID, rng.Uint64(), req.Key, req.Value, @@ -207,19 +221,19 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S } case GetRuntimeIDTx: - _, err := cli.sc.submitKeyValueRuntimeGetRuntimeIDTx(ctx, runtimeID, rng.Uint64()) + _, err := cli.sc.submitKeyValueRuntimeGetRuntimeIDTx(ctx, KeyValueRuntimeID, rng.Uint64()) if err != nil { return err } case ConsensusTransferTx: - err := cli.sc.submitConsensusTransferTx(ctx, runtimeID, rng.Uint64(), staking.Transfer{}) + err := cli.sc.submitConsensusTransferTx(ctx, KeyValueRuntimeID, rng.Uint64(), staking.Transfer{}) if err != nil { return err } case ConsensusAccountsTx: - err := cli.sc.submitConsensusAccountsTx(ctx, runtimeID, rng.Uint64()) + err := cli.sc.submitConsensusAccountsTx(ctx, KeyValueRuntimeID, rng.Uint64()) if err != nil { return err } @@ -231,8 +245,8 @@ func (cli *KVTestClient) submit(ctx context.Context, req interface{}, rng rand.S return nil } -func NewKVTestClient() *KVTestClient { - return &KVTestClient{ +func NewTestClient() *TestClient { + return &TestClient{ seed: "seed", scenario: func(submit func(req interface{}) error) error { return nil }, } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv_scenario.go b/go/oasis-test-runner/scenario/e2e/runtime/test_client_scenario.go similarity index 97% rename from go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv_scenario.go rename to go/oasis-test-runner/scenario/e2e/runtime/test_client_scenario.go index 34752291bed..4b3c8ca2920 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime_client_kv_scenario.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/test_client_scenario.go @@ -65,10 +65,10 @@ func newSimpleKeyValueScenario(repeat bool, encrypted bool) TestClientScenario { for iter := 0; ; iter++ { // Test simple [set,get] calls. key := "hello_key" - value := fmt.Sprintf("hello_value_from_%s:%d", runtimeID, iter) + value := fmt.Sprintf("hello_value_from_%s:%d", KeyValueRuntimeID, iter) response := "" if iter > 0 { - response = fmt.Sprintf("hello_value_from_%s:%d", runtimeID, iter-1) + response = fmt.Sprintf("hello_value_from_%s:%d", KeyValueRuntimeID, iter-1) } if err := submit(InsertKeyValueTx{key, value, response, encrypted, 0}); err != nil { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/trust_root.go b/go/oasis-test-runner/scenario/e2e/runtime/trust_root.go index 4ed230d5f48..63c57cab915 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/trust_root.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/trust_root.go @@ -3,42 +3,28 @@ package runtime import ( "context" "fmt" - "path/filepath" - "strconv" "github.com/hashicorp/go-multierror" - beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" - "github.com/oasisprotocol/oasis-core/go/common" - "github.com/oasisprotocol/oasis-core/go/common/sgx" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" - keymanager "github.com/oasisprotocol/oasis-core/go/keymanager/api" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis/cli" - "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/rust" "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" - registry "github.com/oasisprotocol/oasis-core/go/registry/api" roothash "github.com/oasisprotocol/oasis-core/go/roothash/api" ) // TrustRoot is the consensus trust root verification scenario. var TrustRoot scenario.Scenario = NewTrustRootImpl( "simple", - NewKVTestClient().WithScenario(SimpleKeyValueEncScenario), + NewTestClient().WithScenario(SimpleKeyValueEncScenario), ) -type trustRoot struct { - height string - hash string - chainContext string -} - type TrustRootImpl struct { Scenario } -func NewTrustRootImpl(name string, testClient TestClient) *TrustRootImpl { +func NewTrustRootImpl(name string, testClient *TestClient) *TrustRootImpl { fullName := "trust-root/" + name sc := &TrustRootImpl{ Scenario: *NewScenario(fullName, testClient), @@ -80,171 +66,6 @@ func (sc *TrustRootImpl) Fixture() (*oasis.NetworkFixture, error) { return f, nil } -func (sc *TrustRootImpl) buildRuntimes(ctx context.Context, childEnv *env.Env, runtimes map[common.Namespace]string, trustRoot *trustRoot) error { - // Determine the required directories for building the runtime with an embedded trust root. - buildDir, _ := sc.Flags.GetString(cfgRuntimeSourceDir) - targetDir, _ := sc.Flags.GetString(cfgRuntimeTargetDir) - if buildDir == "" || targetDir == "" { - return fmt.Errorf("runtime build dir and/or target dir not configured") - } - - // Determine TEE hardware. - teeHardware, err := sc.getTEEHardware() - if err != nil { - return err - } - - // Prepare the builder. - builder := rust.NewBuilder(childEnv, buildDir, targetDir, teeHardware) - - // Build runtimes one by one. - var errs *multierror.Error - for runtimeID, runtimeBinary := range runtimes { - switch trustRoot { - case nil: - sc.Logger.Info("building runtime without embedded trust root", - "runtime_id", runtimeID, - "runtime_binary", runtimeBinary, - ) - default: - sc.Logger.Info("building runtime with embedded trust root", - "runtime_id", runtimeID, - "runtime_binary", runtimeBinary, - "trust_root_height", trustRoot.hash, - "trust_root_hash", trustRoot.hash, - "trust_root_chainContext", trustRoot.chainContext, - ) - - // Prepare environment. - builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_HEIGHT", trustRoot.height) - builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_HASH", trustRoot.hash) - builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_CHAIN_CONTEXT", trustRoot.chainContext) - builder.SetEnv("OASIS_TESTS_CONSENSUS_TRUST_RUNTIME_ID", runtimeID.String()) - } - - // Build a new runtime with the given trust root embedded. - if err = builder.Build(runtimeBinary); err != nil { - errs = multierror.Append(errs, err) - } - } - if err = errs.ErrorOrNil(); err != nil { - return fmt.Errorf("failed to build runtimes: %w", err) - } - - return nil -} - -func (sc *TrustRootImpl) buildAllRuntimes(ctx context.Context, childEnv *env.Env, trustRoot *trustRoot) error { - runtimes := map[common.Namespace]string{ - runtimeID: runtimeBinary, - keymanagerID: keyManagerBinary, - } - - return sc.buildRuntimes(ctx, childEnv, runtimes, trustRoot) -} - -func (sc *TrustRootImpl) registerRuntime(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rt *oasis.Runtime, validFrom beacon.EpochTime, nonce uint64) error { - dsc := rt.ToRuntimeDescriptor() - dsc.Deployments[0].ValidFrom = validFrom - - txPath := filepath.Join(childEnv.Dir(), fmt.Sprintf("register_runtime_%s.json", rt.ID())) - if err := cli.Registry.GenerateRegisterRuntimeTx(childEnv.Dir(), dsc, nonce, txPath); err != nil { - return fmt.Errorf("failed to generate register runtime tx: %w", err) - } - - if err := cli.Consensus.SubmitTx(txPath); err != nil { - return fmt.Errorf("failed to register runtime: %w", err) - } - - return nil -} - -func (sc *TrustRootImpl) updateKeyManagerPolicy(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, nonce uint64) error { - // Generate and update the new keymanager runtime's policy. - kmPolicyPath := filepath.Join(childEnv.Dir(), "km_policy.cbor") - kmPolicySig1Path := filepath.Join(childEnv.Dir(), "km_policy_sig1.pem") - kmPolicySig2Path := filepath.Join(childEnv.Dir(), "km_policy_sig2.pem") - kmPolicySig3Path := filepath.Join(childEnv.Dir(), "km_policy_sig3.pem") - kmUpdateTxPath := filepath.Join(childEnv.Dir(), "km_gen_update.json") - sc.Logger.Info("building KM SGX policy enclave policies map") - enclavePolicies := make(map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX) - kmRt := sc.Net.Runtimes()[0] - kmRtEncID := kmRt.GetEnclaveIdentity(0) - var havePolicy bool - if kmRtEncID != nil { - enclavePolicies[*kmRtEncID] = &keymanager.EnclavePolicySGX{} - enclavePolicies[*kmRtEncID].MayQuery = make(map[common.Namespace][]sgx.EnclaveIdentity) - enclavePolicies[*kmRtEncID].MayReplicate = []sgx.EnclaveIdentity{} - for _, rt := range sc.Net.Runtimes() { - if rt.Kind() != registry.KindCompute { - continue - } - if eid := rt.GetEnclaveIdentity(0); eid != nil { - enclavePolicies[*kmRtEncID].MayQuery[rt.ID()] = []sgx.EnclaveIdentity{*eid} - // This is set only in SGX mode. - havePolicy = true - } - } - } - sc.Logger.Info("initing KM policy") - if err := cli.Keymanager.InitPolicy(kmRt.ID(), 1, 0, enclavePolicies, kmPolicyPath); err != nil { - return err - } - sc.Logger.Info("signing KM policy") - if err := cli.Keymanager.SignPolicy("1", kmPolicyPath, kmPolicySig1Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("2", kmPolicyPath, kmPolicySig2Path); err != nil { - return err - } - if err := cli.Keymanager.SignPolicy("3", kmPolicyPath, kmPolicySig3Path); err != nil { - return err - } - if havePolicy { - // In SGX mode, we can update the policy as intended. - sc.Logger.Info("updating KM policy") - if err := cli.Keymanager.GenUpdate(nonce, kmPolicyPath, []string{kmPolicySig1Path, kmPolicySig2Path, kmPolicySig3Path}, kmUpdateTxPath); err != nil { - return err - } - if err := cli.Consensus.SubmitTx(kmUpdateTxPath); err != nil { - return fmt.Errorf("failed to update KM policy: %w", err) - } - } - - return nil -} - -func (sc *TrustRootImpl) chainContext(ctx context.Context) (string, error) { - sc.Logger.Info("fetching consensus chain context") - - cc, err := sc.Net.Controller().Consensus.GetChainContext(ctx) - if err != nil { - return "", err - } - return cc, nil -} - -func (sc *TrustRootImpl) trustRoot(ctx context.Context) (*trustRoot, error) { - sc.Logger.Info("preparing trust root") - - // Let the network run for few blocks to select a suitable trust root. - block, err := sc.waitBlocks(ctx, 5) - if err != nil { - return nil, err - } - - chainContext, err := sc.chainContext(ctx) - if err != nil { - return nil, err - } - - return &trustRoot{ - height: strconv.FormatInt(block.Height, 10), - hash: block.Hash.Hex(), - chainContext: chainContext, - }, nil -} - // PreRun starts the network, prepares a trust root, builds simple key/value and key manager // runtimes, prepares runtime bundles, and runs the test client. func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err error) { @@ -262,13 +83,13 @@ func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err err } // Pick one block and use it as an embedded trust root. - trustRoot, err := sc.trustRoot(ctx) + trustRoot, err := sc.TrustRoot(ctx) if err != nil { return err } // Build simple key/value and key manager runtimes. - if err = sc.buildAllRuntimes(ctx, childEnv, trustRoot); err != nil { + if err = sc.BuildAllRuntimes(ctx, childEnv, trustRoot); err != nil { return err } @@ -288,16 +109,28 @@ func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err err // Register the runtimes. for _, rt := range sc.Net.Runtimes() { - if err = sc.registerRuntime(ctx, childEnv, cli, rt, epoch+2, nonce); err != nil { + rtDsc := rt.ToRuntimeDescriptor() + rtDsc.Deployments[0].ValidFrom = epoch + 2 + if err = sc.RegisterRuntime(ctx, childEnv, cli, rtDsc, nonce); err != nil { return err } nonce++ } // Update the key manager policy. - if err = sc.updateKeyManagerPolicy(ctx, childEnv, cli, nonce); err != nil { + policies, err := sc.BuildEnclavePolicies(childEnv) + if err != nil { return err } + switch policies { + case nil: + sc.Logger.Info("no SGX runtimes, skipping policy update") + default: + if err = sc.ApplyKeyManagerPolicy(ctx, childEnv, cli, 0, policies, nonce); err != nil { + return fmt.Errorf("updating policies: %w", err) + } + nonce++ // nolint: ineffassign + } // Start all the required workers. if err = sc.startClientComputeAndKeyManagerNodes(ctx, childEnv); err != nil { @@ -305,20 +138,13 @@ func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err err } // Run the test client workload to ensure that blocks get processed correctly. - if err = sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - if err = sc.waitTestClient(); err != nil { - return err - } - - return nil + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } // PostRun re-builds simple key/value and key manager runtimes. func (sc *TrustRootImpl) PostRun(ctx context.Context, childEnv *env.Env) error { // In the end, always rebuild all runtimes as we are changing binaries in one of the steps. - return sc.buildAllRuntimes(ctx, childEnv, nil) + return sc.BuildAllRuntimes(ctx, childEnv, nil) } func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error) { @@ -333,7 +159,7 @@ func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error) sc.Logger.Info("testing query latest block") _, err = sc.submitKeyValueRuntimeGetQuery( ctx, - runtimeID, + KeyValueRuntimeID, "hello_key", roothash.RoundLatest, ) @@ -341,7 +167,7 @@ func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error) return err } - latestBlk, err := sc.Net.ClientController().Roothash.GetLatestBlock(ctx, &roothash.RuntimeRequest{RuntimeID: runtimeID, Height: consensus.HeightLatest}) + latestBlk, err := sc.Net.ClientController().Roothash.GetLatestBlock(ctx, &roothash.RuntimeRequest{RuntimeID: KeyValueRuntimeID, Height: consensus.HeightLatest}) if err != nil { return err } @@ -349,7 +175,7 @@ func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error) sc.Logger.Info("testing query for past round", "round", round) _, err = sc.submitKeyValueRuntimeGetQuery( ctx, - runtimeID, + KeyValueRuntimeID, "hello_key", round, ) @@ -372,12 +198,8 @@ func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error) } sc.Logger.Info("starting a second test client to check if queries for the last round work") - sc.Scenario.testClient = NewKVTestClient().WithSeed("seed2").WithScenario(NewTestClientScenario(queries)) - if err := sc.startTestClientOnly(ctx, childEnv); err != nil { - return err - } - - return sc.waitTestClient() + sc.Scenario.TestClient = NewTestClient().WithSeed("seed2").WithScenario(NewTestClientScenario(queries)) + return sc.RunTestClientAndCheckLogs(ctx, childEnv) } func (sc *TrustRootImpl) startClientComputeAndKeyManagerNodes(ctx context.Context, childEnv *env.Env) error { diff --git a/go/oasis-test-runner/scenario/e2e/runtime/trust_root_change.go b/go/oasis-test-runner/scenario/e2e/runtime/trust_root_change.go index 17463d117fe..4078b12bdcf 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/trust_root_change.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/trust_root_change.go @@ -38,7 +38,7 @@ var ( // changes, e.g. on dump-restore network upgrades. TrustRootChangeTest scenario.Scenario = newTrustRootChangeImpl( "change", - NewKVTestClient().WithScenario(InsertKeyValueEncScenario), + NewTestClient().WithScenario(InsertKeyValueEncScenario), true, ) @@ -47,7 +47,7 @@ var ( // consensus chain context changes. TrustRootChangeFailsTest scenario.Scenario = newTrustRootChangeImpl( "change-fails", - NewKVTestClient().WithScenario(SimpleKeyValueEncScenario), + NewTestClient().WithScenario(SimpleKeyValueEncScenario), false, ) ) @@ -59,7 +59,7 @@ type trustRootChangeImpl struct { happy bool } -func newTrustRootChangeImpl(name string, testClient TestClient, happy bool) *trustRootChangeImpl { +func newTrustRootChangeImpl(name string, testClient *TestClient, happy bool) *trustRootChangeImpl { // We will use 3 validators inherited from trust root scenario fixture // to test what happens if the new validator set doesn't have enough // voting power after chain context changes. Since all validators have @@ -119,7 +119,7 @@ func (sc *trustRootChangeImpl) happyRun(ctx context.Context, childEnv *env.Env) // All chain contexts should be unique. chainContexts := make(map[string]struct{}) - c, err := sc.chainContext(ctx) + c, err := sc.ChainContext(ctx) if err != nil { return err } @@ -138,7 +138,7 @@ func (sc *trustRootChangeImpl) happyRun(ctx context.Context, childEnv *env.Env) // Assert that chain context has changed. Test is meaningless if this // doesn't happen. - c, err = sc.chainContext(ctx) + c, err = sc.ChainContext(ctx) if err != nil { return err } @@ -193,7 +193,7 @@ func (sc *trustRootChangeImpl) unhappyRun(ctx context.Context, childEnv *env.Env err = multierror.Append(err, err2).ErrorOrNil() }() - chainContext, err := sc.chainContext(ctx) + chainContext, err := sc.ChainContext(ctx) if err != nil { return err } @@ -278,13 +278,13 @@ func (sc *trustRootChangeImpl) unhappyRun(ctx context.Context, childEnv *env.Env if err = sc.Net.Start(); err != nil { return err } - if err = sc.waitNodesSynced(ctx); err != nil { + if err = sc.WaitNodesSynced(ctx); err != nil { return err } // Assert that chain context has changed. Test is meaningless if this // doesn't happen. - newChainContext, err := sc.chainContext(ctx) + newChainContext, err := sc.ChainContext(ctx) if err != nil { return err } @@ -355,7 +355,7 @@ func (sc *trustRootChangeImpl) dumpRestoreNetwork(childEnv *env.Env, f func(*oas func (sc *trustRootChangeImpl) startRestoredStateTestClient(ctx context.Context, childEnv *env.Env, round int64) error { // Check that everything works with restored state. seed := fmt.Sprintf("seed %d", round) - sc.Scenario.testClient = NewKVTestClient().WithSeed(seed).WithScenario(RemoveKeyValueEncScenario) + sc.Scenario.TestClient = NewTestClient().WithSeed(seed).WithScenario(RemoveKeyValueEncScenario) if err := sc.Scenario.Run(ctx, childEnv); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/runtime/txsource.go b/go/oasis-test-runner/scenario/e2e/runtime/txsource.go index e730611533f..ce06aec9d9b 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/txsource.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/txsource.go @@ -793,7 +793,7 @@ func (sc *txSourceImpl) startWorkload(childEnv *env.Env, errCh chan error, name "--" + flags.CfgDebugTestEntity, "--" + commonGrpc.CfgLogDebug, "--" + flags.CfgGenesisFile, sc.Net.GenesisPath(), - "--" + workload.CfgRuntimeID, runtimeID.String(), + "--" + workload.CfgRuntimeID, KeyValueRuntimeID.String(), "--" + txsource.CfgWorkload, name, "--" + txsource.CfgTimeLimit, sc.timeLimit.String(), "--" + txsource.CfgSeed, sc.seed, @@ -875,7 +875,7 @@ func (sc *txSourceImpl) Run(ctx context.Context, childEnv *env.Env) error { } // Wait for all nodes to be synced before we proceed. - if err := sc.waitNodesSynced(ctx); err != nil { + if err := sc.WaitNodesSynced(ctx); err != nil { return err } diff --git a/go/oasis-test-runner/scenario/e2e/scenario.go b/go/oasis-test-runner/scenario/e2e/scenario.go new file mode 100644 index 00000000000..bdec0cb1622 --- /dev/null +++ b/go/oasis-test-runner/scenario/e2e/scenario.go @@ -0,0 +1,162 @@ +// Package e2e implements the Oasis e2e test scenarios. +package e2e + +import ( + flag "github.com/spf13/pflag" + + "github.com/oasisprotocol/oasis-core/go/common/logging" + "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction" + consensusGenesis "github.com/oasisprotocol/oasis-core/go/consensus/genesis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/cmd" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" +) + +const ( + // cfgNodeBinary is the path to oasis-node executable. + cfgNodeBinary = "node.binary" +) + +// ParamsDummyScenario is a dummy instance of E2E scenario used to register global E2E flags. +var ParamsDummyScenario = NewScenario("") + +// Scenario is a base scenario for oasis-node end-to-end tests. +type Scenario struct { + Net *oasis.Network + Flags *env.ParameterFlagSet + Logger *logging.Logger + + name string +} + +// NewScenario creates a new base scenario for oasis-node end-to-end tests. +func NewScenario(name string) *Scenario { + // Empty scenario name is used for registering global parameters only. + fullName := "e2e" + if name != "" { + fullName += "/" + name + } + + sc := &Scenario{ + name: fullName, + Logger: logging.GetLogger("scenario/" + fullName), + Flags: env.NewParameterFlagSet(fullName, flag.ContinueOnError), + } + sc.Flags.String(cfgNodeBinary, "oasis-node", "path to the node binary") + + return sc +} + +// Clone implements scenario.Scenario. +func (sc *Scenario) Clone() Scenario { + return Scenario{ + Net: sc.Net, + Flags: sc.Flags.Clone(), + Logger: sc.Logger, + name: sc.name, + } +} + +// Name implements scenario.Scenario. +func (sc *Scenario) Name() string { + return sc.name +} + +// Parameters implements scenario.Scenario. +func (sc *Scenario) Parameters() *env.ParameterFlagSet { + return sc.Flags +} + +// PreInit implements scenario.Scenario. +func (sc *Scenario) PreInit(childEnv *env.Env) error { + return nil +} + +// Fixture implements scenario.Scenario. +func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error) { + nodeBinary, _ := sc.Flags.GetString(cfgNodeBinary) + + return &oasis.NetworkFixture{ + Network: oasis.NetworkCfg{ + NodeBinary: nodeBinary, + Consensus: consensusGenesis.Genesis{ + Parameters: consensusGenesis.Parameters{ + GasCosts: transaction.Costs{ + consensusGenesis.GasOpTxByte: 1, + }, + }, + }, + }, + Entities: []oasis.EntityCfg{ + {IsDebugTestEntity: true}, + {}, + }, + Validators: []oasis.ValidatorFixture{ + {Entity: 1, Consensus: oasis.ConsensusFixture{SupplementarySanityInterval: 1}}, + {Entity: 1}, + {Entity: 1}, + }, + Seeds: []oasis.SeedFixture{{}}, + }, nil +} + +// Init implements scenario.Scenario. +func (sc *Scenario) Init(childEnv *env.Env, net *oasis.Network) error { + sc.Net = net + return nil +} + +// RegisterScenarios registers all end-to-end scenarios. +func RegisterScenarios() error { + // Register non-scenario-specific parameters. + cmd.RegisterScenarioParams(ParamsDummyScenario.Name(), ParamsDummyScenario.Parameters()) + + // Register default scenarios which are executed, if no test names provided. + for _, s := range []scenario.Scenario{ + // Registry CLI test. + RegistryCLI, + // Stake CLI test. + StakeCLI, + // Gas fees tests. + GasFeesStaking, + GasFeesStakingDumpRestore, + // Identity CLI test. + IdentityCLI, + // Genesis file test. + GenesisFile, + // Node upgrade tests. + NodeUpgradeDummy, + NodeUpgradeMaxAllowances, + NodeUpgradeV62, + NodeUpgradeEmpty, + NodeUpgradeCancel, + // Debonding entries from genesis test. + Debond, + // Early query test. + EarlyQuery, + EarlyQueryInitHeight, + // Consensus state sync. + ConsensusStateSync, + // Multiple seeds test. + MultipleSeeds, + // Seed API test. + SeedAPI, + // ValidatorEquivocation test. + ValidatorEquivocation, + // Byzantine VRF beacon tests. + ByzantineVRFBeaconHonest, + ByzantineVRFBeaconEarly, + ByzantineVRFBeaconMissing, + // Minimum transact balance test. + MinTransactBalance, + // Consensus governance update parameters tests. + ChangeParametersMinCommissionRate, + } { + if err := cmd.Register(s); err != nil { + return err + } + } + + return nil +} diff --git a/go/registry/api/api.go b/go/registry/api/api.go index de3fb1158ed..cca7ec8c4da 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -973,7 +973,7 @@ func verifyNodeRuntimeChanges( continue } - logger.Error("RegisterNode: trying to update runtimes, current version is misssing in new set", + logger.Error("RegisterNode: trying to update runtimes, current version is missing in new set", "runtime_id", id, "version", version, ) diff --git a/keymanager/src/api/requests.rs b/keymanager/src/api/requests.rs index 51314d39c35..3538705d174 100644 --- a/keymanager/src/api/requests.rs +++ b/keymanager/src/api/requests.rs @@ -164,6 +164,7 @@ pub struct LongTermKeyRequest { /// Key pair ID. pub key_pair_id: KeyPairId, /// Generation. + #[cbor(optional)] pub generation: u64, } diff --git a/runtime/src/consensus/tendermint/verifier/mod.rs b/runtime/src/consensus/tendermint/verifier/mod.rs index d3cfcd365f7..1a40370027c 100644 --- a/runtime/src/consensus/tendermint/verifier/mod.rs +++ b/runtime/src/consensus/tendermint/verifier/mod.rs @@ -639,7 +639,9 @@ impl Verifier { // Build a light client using the embedded trust root or trust root // stored in the local store. info!(self.logger, "Loading trusted state"); - let trusted_state: TrustedState = self.trusted_state_store.load(&self.trust_root)?; + let trusted_state: TrustedState = self + .trusted_state_store + .load(self.runtime_version, &self.trust_root)?; // Verify if we can trust light blocks from a new chain if the consensus // chain context changes. @@ -696,7 +698,8 @@ impl Verifier { // processing any requests. let verified_block = self.verify_to_target(HEIGHT_LATEST, &mut cache, &mut instance)?; - self.trusted_state_store.save(&instance.state.light_store); + self.trusted_state_store + .save(self.runtime_version, &instance.state.light_store); let mut last_saved_verified_block_height = verified_block.signed_header.header.height.value(); @@ -770,7 +773,8 @@ impl Verifier { if let Some(last_verified_block) = cache.last_verified_block.as_ref() { let last_height = last_verified_block.signed_header.header.height.into(); if last_height - last_saved_verified_block_height > TRUSTED_STATE_SAVE_INTERVAL { - self.trusted_state_store.save(&instance.state.light_store); + self.trusted_state_store + .save(self.runtime_version, &instance.state.light_store); last_saved_verified_block_height = last_height; } } diff --git a/runtime/src/consensus/tendermint/verifier/store/state.rs b/runtime/src/consensus/tendermint/verifier/store/state.rs index ae93f4158a0..ef2fccbe520 100644 --- a/runtime/src/consensus/tendermint/verifier/store/state.rs +++ b/runtime/src/consensus/tendermint/verifier/store/state.rs @@ -10,6 +10,7 @@ use crate::{ common::{ namespace::Namespace, sgx::{seal, EnclaveIdentity}, + version::Version, }, consensus::verifier::{Error, TrustRoot}, protocol::ProtocolUntrustedLocalStorage, @@ -93,7 +94,7 @@ impl TrustedStateStore { /// /// Panics in case the light store does not have any blocks or if insertion to the underlying /// runtime's untrusted local store fails. - pub fn save(&self, store: &Box) { + pub fn save(&self, runtime_version: Version, store: &Box) { let lowest_block = store.lowest(Status::Trusted).unwrap(); let highest_block = store.highest(Status::Trusted).unwrap(); @@ -116,18 +117,22 @@ impl TrustedStateStore { // Store the trusted state. self.untrusted_local_store - .insert(Self::derive_storage_key(), sealed) + .insert(Self::derive_storage_key(runtime_version), sealed) .unwrap(); } /// Attempts to load previously sealed trusted state. /// /// If no sealed trusted state is available, it returns state based on the passed trust root. - pub fn load(&self, trust_root: &TrustRoot) -> Result { + pub fn load( + &self, + runtime_version: Version, + trust_root: &TrustRoot, + ) -> Result { // Attempt to load the previously sealed trusted state. let untrusted_value = self .untrusted_local_store - .get(Self::derive_storage_key()) + .get(Self::derive_storage_key(runtime_version)) .map_err(|_| Error::TrustedStateLoadingFailed)?; if untrusted_value.is_empty() { return Ok(TrustedState { @@ -149,13 +154,14 @@ impl TrustedStateStore { Ok(trusted_state) } - fn derive_storage_key() -> Vec { + fn derive_storage_key(runtime_version: Version) -> Vec { // Namespace storage key by MRENCLAVE as we can only unseal our own sealed data and we need // to support upgrades. We assume that an upgrade will include an up-to-date trusted state // anyway. format!( - "{}.{:x}", + "{}.{}.{:x}", TRUSTED_STATE_STORAGE_KEY_PREFIX, + u64::from(runtime_version), EnclaveIdentity::current() .map(|eid| eid.mr_enclave) .unwrap_or_default()