From b6764b454044778125dddc8183c64122f17d7f6d Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Fri, 1 Nov 2024 10:41:40 +0100 Subject: [PATCH] fix: use require instead of t.Fatal(err) in tests/integration package Signed-off-by: Matthieu MOREL --- tests/integration/clientv3/cluster_test.go | 6 +- .../clientv3/concurrency/mutex_test.go | 44 +- .../clientv3/concurrency/session_test.go | 36 +- .../clientv3/connectivity/black_hole_test.go | 28 +- .../clientv3/connectivity/dial_test.go | 32 +- .../connectivity/network_partition_test.go | 25 +- .../connectivity/server_shutdown_test.go | 18 +- .../clientv3/examples/example_kv_test.go | 3 +- .../clientv3/examples/example_lease_test.go | 2 +- .../clientv3/examples/main_test.go | 3 +- .../recipes/v3_double_barrier_test.go | 5 +- .../experimental/recipes/v3_lock_test.go | 18 +- .../experimental/recipes/v3_queue_test.go | 1 + tests/integration/clientv3/kv_test.go | 64 ++- .../integration/clientv3/lease/lease_test.go | 68 +-- .../clientv3/lease/leasing_test.go | 386 +++++++----------- .../integration/clientv3/maintenance_test.go | 40 +- .../integration/clientv3/mirror_auth_test.go | 13 +- tests/integration/clientv3/mirror_test.go | 10 +- tests/integration/clientv3/namespace_test.go | 20 +- .../clientv3/naming/endpoints_test.go | 3 +- .../integration/clientv3/ordering_kv_test.go | 49 +-- .../clientv3/ordering_util_test.go | 49 +-- tests/integration/clientv3/txn_test.go | 31 +- tests/integration/clientv3/user_test.go | 38 +- tests/integration/clientv3/util.go | 1 - tests/integration/clientv3/watch_test.go | 117 ++---- tests/integration/cluster_test.go | 33 +- tests/integration/embed/embed_test.go | 14 +- tests/integration/grpc_test.go | 13 +- tests/integration/hashkv_test.go | 6 +- tests/integration/member_test.go | 4 +- tests/integration/metrics_test.go | 68 +-- .../proxy/grpcproxy/cluster_test.go | 8 +- tests/integration/revision_test.go | 16 +- .../integration/snapshot/v3_snapshot_test.go | 3 +- tests/integration/testing_test.go | 1 - tests/integration/tracing_test.go | 16 +- tests/integration/utl_wal_version_test.go | 13 +- tests/integration/v2store/main_test.go | 2 - tests/integration/v3_alarm_test.go | 77 ++-- tests/integration/v3_auth_test.go | 47 +-- tests/integration/v3_election_test.go | 38 +- tests/integration/v3_failover_test.go | 7 +- tests/integration/v3_grpc_inflight_test.go | 11 +- tests/integration/v3_grpc_test.go | 197 ++++----- tests/integration/v3_kv_test.go | 36 +- tests/integration/v3_leadership_test.go | 10 +- tests/integration/v3_lease_test.go | 190 +++------ tests/integration/v3_stm_test.go | 17 +- tests/integration/v3_tls_test.go | 4 +- tests/integration/v3_watch_restore_test.go | 9 +- tests/integration/v3_watch_test.go | 132 +++--- tests/integration/v3election_grpc_test.go | 16 +- tests/integration/v3lock_grpc_test.go | 10 +- 55 files changed, 782 insertions(+), 1326 deletions(-) diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go index 9ed510108b8..c1e3b15ef1c 100644 --- a/tests/integration/clientv3/cluster_test.go +++ b/tests/integration/clientv3/cluster_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/client/pkg/v3/types" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) @@ -266,9 +268,7 @@ func TestMemberPromote(t *testing.T) { // (the response has information on peer urls of the existing members in cluster) learnerMember := clus.MustNewMember(t, memberAddResp) - if err = learnerMember.Launch(); err != nil { - t.Fatal(err) - } + require.NoError(t, learnerMember.Launch()) // retry until promote succeed or timeout timeout := time.After(5 * time.Second) diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go index bf5b187686f..f0d8454d6f7 100644 --- a/tests/integration/clientv3/concurrency/mutex_test.go +++ b/tests/integration/clientv3/concurrency/mutex_test.go @@ -19,6 +19,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -26,29 +28,21 @@ import ( func TestMutexLockSessionExpired(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // create two separate sessions for lock competition s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s1.Close() m1 := concurrency.NewMutex(s1, "/my-lock/") s2, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) m2 := concurrency.NewMutex(s2, "/my-lock/") // acquire lock for s1 - if err = m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, m1.Lock(context.TODO())) m2Locked := make(chan struct{}) var err2 error @@ -61,28 +55,19 @@ func TestMutexLockSessionExpired(t *testing.T) { }() // revoke the session of m2 before unlock m1 - err = s2.Close() - if err != nil { - t.Fatal(err) - } - if err := m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, s2.Close()) + require.NoError(t, m1.Unlock(context.TODO())) <-m2Locked } func TestMutexUnlock(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s1.Close() m1 := concurrency.NewMutex(s1, "/my-lock/") @@ -94,13 +79,8 @@ func TestMutexUnlock(t *testing.T) { t.Fatal(err) } - if err = m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } - - if err = m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, m1.Lock(context.TODO())) + require.NoError(t, m1.Unlock(context.TODO())) err = m1.Unlock(context.TODO()) if err == nil { diff --git a/tests/integration/clientv3/concurrency/session_test.go b/tests/integration/clientv3/concurrency/session_test.go index 37fc1899b0f..e7bcc563889 100644 --- a/tests/integration/clientv3/concurrency/session_test.go +++ b/tests/integration/clientv3/concurrency/session_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -28,18 +29,12 @@ import ( func TestSessionOptions(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() lease, err := cli.Grant(context.Background(), 100) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() assert.Equal(t, s.Lease(), lease.ID) @@ -50,18 +45,15 @@ func TestSessionOptions(t *testing.T) { t.Fatal("session did not get orphaned as expected") } } + func TestSessionTTLOptions(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() - var setTTL = 90 + setTTL := 90 s, err := concurrency.NewSession(cli, concurrency.WithTTL(setTTL)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() leaseID := s.Lease() @@ -83,18 +75,12 @@ func TestSessionTTLOptions(t *testing.T) { func TestSessionCtx(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() lease, err := cli.Grant(context.Background(), 100) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() assert.Equal(t, s.Lease(), lease.ID) diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go index e1fc1c57b97..00b7849ea56 100644 --- a/tests/integration/clientv3/connectivity/black_hole_test.go +++ b/tests/integration/clientv3/connectivity/black_hole_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -62,9 +63,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { timeout := pingInterval + integration2.RequestWaitTimeout cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify()) @@ -80,9 +79,8 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { clus.Members[0].Bridge().Blackhole() - if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(context.TODO(), "foo", "bar") + require.NoError(t, err) select { case <-wch: case <-time.After(timeout): @@ -97,12 +95,10 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { clus.Members[1].Bridge().Blackhole() // make sure client[0] can connect to eps[0] after remove the blackhole. - if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Get(context.TODO(), "foo") + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1") + require.NoError(t, err) select { case <-wch: @@ -183,9 +179,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -214,7 +208,5 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien t.Errorf("#%d: failed with error %v", i, err) } } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go index 769ce17f4df..54556d0f8dd 100644 --- a/tests/integration/clientv3/connectivity/dial_test.go +++ b/tests/integration/clientv3/connectivity/dial_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -54,9 +55,7 @@ func TestDialTLSExpired(t *testing.T) { defer clus.Terminate(t) tls, err := testTLSInfoExpired.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // expect remote errors "tls: bad certificate" _, err = integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL}, @@ -120,9 +119,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() if setBefore { @@ -137,9 +134,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { } time.Sleep(time.Second * 2) ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout) - if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil { - t.Fatal(err) - } + _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()) + require.NoError(t, err) cancel() } @@ -160,9 +156,8 @@ func TestSwitchSetEndpoints(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if _, err := cli.Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err := cli.Get(ctx, "foo") + require.NoError(t, err) } func TestRejectOldCluster(t *testing.T) { @@ -178,9 +173,7 @@ func TestRejectOldCluster(t *testing.T) { RejectOldCluster: true, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.Close() } @@ -192,9 +185,7 @@ func TestDialForeignEndpoint(t *testing.T) { defer clus.Terminate(t) conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer conn.Close() // grpc can return a lazy connection that's not connected yet; confirm @@ -202,9 +193,8 @@ func TestDialForeignEndpoint(t *testing.T) { kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0)) ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) defer cancel() - if _, gerr := kvc.Get(ctx, "abc"); gerr != nil { - t.Fatal(err) - } + _, gerr := kvc.Get(ctx, "abc") + require.NoError(t, gerr) } // TestSetEndpointAndPut checks that a Put following a SetEndpoints diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go index 6c99c32d04c..557cdb0b31e 100644 --- a/tests/integration/clientv3/connectivity/network_partition_test.go +++ b/tests/integration/clientv3/connectivity/network_partition_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -124,9 +125,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned clientv3test.MustWaitPinReady(t, cli) @@ -180,9 +179,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T DialTimeout: 2 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // add all eps to list, so that when the original pined one fails @@ -201,9 +198,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T break } } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { @@ -233,9 +228,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { // pin eps[target] watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("watchCli created to: %v", target) defer watchCli.Close() @@ -291,9 +284,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -303,9 +294,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { cli.SetEndpoints(eps...) time.Sleep(time.Second * 2) conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer conn.Close() clus.Members[leaderIndex].InjectPartition(t, clus.Members[(leaderIndex+1)%3], clus.Members[(leaderIndex+2)%3]) diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go index 3afc8eb4c01..9e44ea9f906 100644 --- a/tests/integration/clientv3/connectivity/server_shutdown_test.go +++ b/tests/integration/clientv3/connectivity/server_shutdown_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -45,9 +47,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { // pin eps[lead] watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer watchCli.Close() // wait for eps[lead] to be pinned @@ -91,9 +91,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { // writes to eps[lead+1] putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer putCli.Close() for { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -154,9 +152,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie // pin eps[0] cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -177,9 +173,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie cctx, ccancel := context.WithTimeout(context.Background(), time.Second) err = op(cli, cctx) ccancel() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) { diff --git a/tests/integration/clientv3/examples/example_kv_test.go b/tests/integration/clientv3/examples/example_kv_test.go index 6bfb2428055..e4fa4bf5f44 100644 --- a/tests/integration/clientv3/examples/example_kv_test.go +++ b/tests/integration/clientv3/examples/example_kv_test.go @@ -322,7 +322,8 @@ func ExampleKV_do() { ops := []clientv3.Op{ clientv3.OpPut("put-key", "123"), clientv3.OpGet("put-key"), - clientv3.OpPut("put-key", "456")} + clientv3.OpPut("put-key", "456"), + } for _, op := range ops { if _, err := cli.Do(context.TODO(), op); err != nil { diff --git a/tests/integration/clientv3/examples/example_lease_test.go b/tests/integration/clientv3/examples/example_lease_test.go index beca3692bfa..b0e6c5ef366 100644 --- a/tests/integration/clientv3/examples/example_lease_test.go +++ b/tests/integration/clientv3/examples/example_lease_test.go @@ -48,7 +48,7 @@ func ExampleLease_grant() { log.Fatal(err) } }) - //Output: + // Output: } func mockLease_revoke() { diff --git a/tests/integration/clientv3/examples/main_test.go b/tests/integration/clientv3/examples/main_test.go index 50251d840a6..338a1ed6468 100644 --- a/tests/integration/clientv3/examples/main_test.go +++ b/tests/integration/clientv3/examples/main_test.go @@ -34,7 +34,8 @@ var lazyCluster = integration.NewLazyClusterWithConfig( integration2.ClusterConfig{ Size: 3, WatchProgressNotifyInterval: 200 * time.Millisecond, - DisableStrictReconfigCheck: true}) + DisableStrictReconfigCheck: true, + }) func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() } diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go index 680476b48da..d8f610e66c5 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -216,9 +217,7 @@ func TestDoubleBarrierFailover(t *testing.T) { } } - if err = s0.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, s0.Close()) // join on rest of waiters for i := 0; i < waiters-1; i++ { select { diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go index 1fcbc46e144..4a802f72cb4 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -182,14 +184,10 @@ func TestMutexSessionRelock(t *testing.T) { } m := concurrency.NewMutex(session, "test-mutex") - if err := m.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, m.Lock(context.TODO())) m2 := concurrency.NewMutex(session, "test-mutex") - if err := m2.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, m2.Lock(context.TODO())) } // TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all @@ -211,9 +209,7 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { } defer firstOwnerSession.Close() firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex") - if err = firstOwnerMutex.Lock(cctx); err != nil { - t.Fatal(err) - } + require.NoError(t, firstOwnerMutex.Lock(cctx)) victimSession, err := concurrency.NewSession(cli) if err != nil { @@ -286,9 +282,7 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { default: } - if err := firstOwnerMutex.Unlock(cctx); err != nil { - t.Fatal(err) - } + require.NoError(t, firstOwnerMutex.Unlock(cctx)) select { case <-newOwnerDonec: diff --git a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go index 7ace22eb8f6..73ed5552fe2 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go @@ -234,6 +234,7 @@ func (q *flatPriorityQueue) Enqueue(val string) error { // randomized to stress dequeuing logic; order isn't important return q.PriorityQueue.Enqueue(val, uint16(rand.Intn(2))) } + func (q *flatPriorityQueue) Dequeue() (string, error) { return q.PriorityQueue.Dequeue() } diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go index 66a9bce59d5..bb01fded844 100644 --- a/tests/integration/clientv3/kv_test.go +++ b/tests/integration/clientv3/kv_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -61,9 +62,7 @@ func TestKVPutError(t *testing.T) { } _, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50))) - if err != nil { // below quota - t.Fatal(err) - } + require.NoError(t, err) // below quota time.Sleep(1 * time.Second) // give enough time for commit @@ -123,13 +122,11 @@ func TestKVPutWithIgnoreValue(t *testing.T) { t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) } - if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) - if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()) + require.NoError(t, err) rr, rerr := kv.Get(context.TODO(), "foo") if rerr != nil { t.Fatal(rerr) @@ -158,17 +155,14 @@ func TestKVPutWithIgnoreLease(t *testing.T) { t.Errorf("failed to create lease %v", err) } - if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); !errors.Is(err, rpctypes.ErrKeyNotFound) { - t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) - } + _, err = kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()) + require.ErrorIs(t, err, rpctypes.ErrKeyNotFound) - if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)) + require.NoError(t, err) - if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()) + require.NoError(t, err) rr, rerr := kv.Get(context.TODO(), "zoo") if rerr != nil { @@ -209,13 +203,9 @@ func TestKVPutWithRequireLeader(t *testing.T) { `type="unary"`, fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if cv < 1 { // >1 when retried t.Fatalf("expected at least 1, got %q", cnt) } @@ -298,9 +288,7 @@ func TestKVGetErrConnClosed(t *testing.T) { cli := clus.Client(0) donec := make(chan struct{}) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) clus.TakeClient(0) go func() { @@ -326,9 +314,7 @@ func TestKVNewAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) donec := make(chan struct{}) go func() { @@ -493,9 +479,8 @@ func TestKVGetRetry(t *testing.T) { kv := clus.Client(fIdx) ctx := context.TODO() - if _, err := kv.Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := kv.Put(ctx, "foo", "bar") + require.NoError(t, err) clus.Members[fIdx].Stop(t) @@ -650,9 +635,8 @@ func TestKVPutAtMostOnce(t *testing.T) { clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(0).Put(context.TODO(), "k", "1") + require.NoError(t, err) for i := 0; i < 10; i++ { clus.Members[0].Bridge().DropConnections() @@ -664,17 +648,15 @@ func TestKVPutAtMostOnce(t *testing.T) { time.Sleep(5 * time.Millisecond) } }() - _, err := clus.Client(0).Put(context.TODO(), "k", "v") + _, perr := clus.Client(0).Put(context.TODO(), "k", "v") <-donec - if err != nil { + if perr != nil { break } } resp, err := clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if resp.Kvs[0].Version > 11 { t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) } diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go index 66b4f92ae4d..4d7d9e23981 100644 --- a/tests/integration/clientv3/lease/lease_test.go +++ b/tests/integration/clientv3/lease/lease_test.go @@ -266,9 +266,8 @@ func TestLeaseKeepAliveNotFound(t *testing.T) { lchs = append(lchs, leaseCh{resp.ID, kach}) } - if _, err := cli.Revoke(context.TODO(), lchs[1].lid); err != nil { - t.Fatal(err) - } + _, err := cli.Revoke(context.TODO(), lchs[1].lid) + require.NoError(t, err) <-lchs[0].ch if _, ok := <-lchs[0].ch; !ok { @@ -287,9 +286,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) donec := make(chan struct{}) go func() { @@ -359,9 +356,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) donec := make(chan struct{}) go func() { @@ -386,15 +381,12 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { cli := clus.Client(0) resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := resp.ID clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err = cli.Close() + require.NoError(t, err) errMsgCh := make(chan string, 1) go func() { @@ -427,9 +419,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 10) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) if kerr != nil { t.Fatal(kerr) @@ -444,9 +434,8 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { time.Sleep(time.Second) clus.WaitLeader(t) - if _, err := clus.Client(1).Revoke(context.TODO(), resp.ID); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Revoke(context.TODO(), resp.ID) + require.NoError(t, err) clus.Members[0].Restart(t) @@ -473,9 +462,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // keep client disconnected clus.Members[0].Stop(t) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) @@ -506,9 +493,7 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) if kerr != nil { t.Fatal(kerr) @@ -553,9 +538,8 @@ func TestLeaseTimeToLive(t *testing.T) { } // linearized read to ensure Puts propagated to server backing lapi - if _, err := c.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = c.Get(context.TODO(), "abc") + require.NoError(t, err) lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys()) if lerr != nil { @@ -641,9 +625,7 @@ func TestLeaseLeases(t *testing.T) { } resp, err := cli.Leases(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Leases) != 5 { t.Fatalf("len(resp.Leases) expected 5, got %d", len(resp.Leases)) } @@ -664,16 +646,12 @@ func TestLeaseRenewLostQuorum(t *testing.T) { cli := clus.Client(0) r, err := cli.Grant(context.TODO(), 4) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kctx, kcancel := context.WithCancel(context.Background()) defer kcancel() ka, err := cli.KeepAlive(kctx, r.ID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // consume first keepalive so next message sends when cluster is down <-ka lastKa := time.Now() @@ -715,9 +693,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) { clus.TakeClient(0) resp, err := cli.Grant(ctx, 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.Close() _, err = cli.KeepAlive(ctx, resp.ID) @@ -791,13 +767,9 @@ func TestLeaseWithRequireLeader(t *testing.T) { c := clus.Client(0) lid1, err1 := c.Grant(context.TODO(), 60) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lid2, err2 := c.Grant(context.TODO(), 60) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) // kaReqLeader close if the leader is lost kaReqLeader, kerr1 := c.KeepAlive(clientv3.WithRequireLeader(context.TODO()), lid1.ID) if kerr1 != nil { diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go index 22c98f9ed05..787f4631589 100644 --- a/tests/integration/clientv3/lease/leasing_test.go +++ b/tests/integration/clientv3/lease/leasing_test.go @@ -52,19 +52,15 @@ func TestLeasingPutGet(t *testing.T) { defer closeLKV3() resp, err := lKV1.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Errorf("expected nil, got %q", resp.Kvs[0].Key) } - if _, err = lKV1.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } - if resp, err = lKV2.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = lKV1.Put(context.TODO(), "abc", "def") + require.NoError(t, err) + resp, err = lKV2.Get(context.TODO(), "abc") + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } @@ -72,16 +68,13 @@ func TestLeasingPutGet(t *testing.T) { t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) } - if _, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - if _, err = lKV2.Put(context.TODO(), "abc", "ghi"); err != nil { - t.Fatal(err) - } + _, err = lKV3.Get(context.TODO(), "abc") + require.NoError(t, err) + _, err = lKV2.Put(context.TODO(), "abc", "ghi") + require.NoError(t, err) - if resp, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + resp, err = lKV3.Get(context.TODO(), "abc") + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } @@ -108,22 +101,18 @@ func TestLeasingInterval(t *testing.T) { } resp, err := lkv.Get(context.TODO(), "abc/", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 3 { t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) } // load into cache - if _, err = lkv.Get(context.TODO(), "abc/a"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "abc/a") + require.NoError(t, err) // get when prefix is also a cached key - if resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } + resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix()) + require.NoError(t, err) if len(resp.Kvs) != 2 { t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) } @@ -139,17 +128,13 @@ func TestLeasingPutInvalidateNew(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Put(context.TODO(), "k", "v") + require.NoError(t, err) lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cResp, cerr := clus.Client(0).Get(context.TODO(), "k") if cerr != nil { t.Fatal(cerr) @@ -165,25 +150,20 @@ func TestLeasingPutInvalidateExisting(t *testing.T) { clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) - if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") require.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Put(context.TODO(), "k", "v") + require.NoError(t, err) lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cResp, cerr := clus.Client(0).Get(context.TODO(), "k") if cerr != nil { t.Fatal(cerr) @@ -232,20 +212,16 @@ func TestLeasingGetSerializable(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "cached", "abc") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "cached") + require.NoError(t, err) clus.Members[1].Stop(t) // don't necessarily try to acquire leasing key ownership for new key resp, err := lkv.Get(context.TODO(), "uncached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected no keys, got response %+v`, resp) } @@ -254,9 +230,7 @@ func TestLeasingGetSerializable(t *testing.T) { // leasing key ownership should have "cached" locally served cachedResp, err := lkv.Get(context.TODO(), "cached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) } @@ -272,17 +246,13 @@ func TestLeasingPrevKey(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // acquire leasing key - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) resp, err := lkv.Put(context.TODO(), "k", "def", clientv3.WithPrevKV()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if resp.PrevKv == nil || string(resp.PrevKv.Value) != "abc" { t.Fatalf(`expected PrevKV.Value="abc", got response %+v`, resp) } @@ -299,12 +269,9 @@ func TestLeasingRevGet(t *testing.T) { defer closeLKV() putResp, err := clus.Client(0).Put(context.TODO(), "k", "abc") - if err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k", "def"); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "k", "def") + require.NoError(t, err) // check historic revision getResp, gerr := lkv.Get(context.TODO(), "k", clientv3.WithRev(putResp.Header.Revision)) @@ -334,13 +301,11 @@ func TestLeasingGetWithOpts(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // in cache - if _, err = lkv.Get(context.TODO(), "k", clientv3.WithKeysOnly()); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k", clientv3.WithKeysOnly()) + require.NoError(t, err) clus.Members[0].Stop(t) @@ -353,8 +318,8 @@ func TestLeasingGetWithOpts(t *testing.T) { clientv3.WithSerializable(), } for _, opt := range opts { - _, err := lkv.Get(context.TODO(), "k", opt) - require.NoError(t, err) + _, gerr := lkv.Get(context.TODO(), "k", opt) + require.NoError(t, gerr) } var getOpts []clientv3.OpOption @@ -362,9 +327,8 @@ func TestLeasingGetWithOpts(t *testing.T) { getOpts = append(getOpts, opts[rand.Intn(len(opts))]) } getOpts = getOpts[:rand.Intn(len(opts))] - if _, err := lkv.Get(context.TODO(), "k", getOpts...); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k", getOpts...) + require.NoError(t, err) } // TestLeasingConcurrentPut ensures that a get after concurrent puts returns @@ -379,9 +343,8 @@ func TestLeasingConcurrentPut(t *testing.T) { defer closeLKV() // force key into leasing key cache - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) // concurrently put through leasing client numPuts := 16 @@ -405,9 +368,7 @@ func TestLeasingConcurrentPut(t *testing.T) { // confirm Get gives most recently put revisions getResp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } + require.NoError(t, gerr) if mr := getResp.Kvs[0].ModRevision; mr != maxRev { t.Errorf("expected ModRevision %d, got %d", maxRev, mr) } @@ -425,21 +386,17 @@ func TestLeasingDisconnectedGet(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "cached", "abc") + require.NoError(t, err) // get key so it's cached - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "cached") + require.NoError(t, err) clus.Members[0].Stop(t) // leasing key ownership should have "cached" locally served cachedResp, err := lkv.Get(context.TODO(), "cached") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) } @@ -454,29 +411,23 @@ func TestLeasingDeleteOwner(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // get+own / delete / get - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Delete(context.TODO(), "k") + require.NoError(t, err) resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) } // try to double delete - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Delete(context.TODO(), "k") + require.NoError(t, err) } func TestLeasingDeleteNonOwner(t *testing.T) { @@ -492,23 +443,18 @@ func TestLeasingDeleteNonOwner(t *testing.T) { require.NoError(t, err) defer closeLKV2() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // acquire ownership - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv1.Get(context.TODO(), "k") + require.NoError(t, err) // delete via non-owner - if _, err = lkv2.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv2.Delete(context.TODO(), "k") + require.NoError(t, err) // key should be removed from lkv1 resp, err := lkv1.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) } @@ -523,22 +469,17 @@ func TestLeasingOverwriteResponse(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp.Kvs[0].Key[0] = 'z' resp.Kvs[0].Value[0] = 'z' resp, err = lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Key) != "k" { t.Errorf(`expected key "k", got %q`, string(resp.Kvs[0].Key)) @@ -557,17 +498,14 @@ func TestLeasingOwnerPutResponse(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) _, gerr := lkv.Get(context.TODO(), "k") if gerr != nil { t.Fatal(gerr) } presp, err := lkv.Put(context.TODO(), "k", "def") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if presp == nil { t.Fatal("expected put response, got nil") } @@ -598,12 +536,11 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) { keyCount := rand.Intn(10) + 1 for i := 0; i < keyCount; i++ { k := fmt.Sprintf("k-%d", i) - _, err := clus.Client(0).Put(context.TODO(), k, k+k) - require.NoError(t, err) - } - if _, err := lkv.Get(context.TODO(), "k-"); err != nil { - t.Fatal(err) + _, perr := clus.Client(0).Put(context.TODO(), k, k+k) + require.NoError(t, perr) } + _, err = lkv.Get(context.TODO(), "k-") + require.NoError(t, err) tresp, terr := lkv.Txn(context.TODO()).Then(clientv3.OpGet("k-", clientv3.WithPrefix())).Commit() if terr != nil { @@ -642,9 +579,7 @@ func TestLeasingTxnOwnerGet(t *testing.T) { for i := range presps { k := fmt.Sprintf("k-%d", i) presp, err := client.Put(context.TODO(), k, k+k) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) presps[i] = presp _, err = lkv.Get(context.TODO(), k) @@ -718,9 +653,7 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { // cache in lkv resp, err := lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != keyCount { t.Fatalf("expected %d keys, got %d", keyCount, len(resp.Kvs)) } @@ -730,9 +663,7 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { } resp, err = lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys, got %d", len(resp.Kvs)) } @@ -747,9 +678,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // cache in lkv if _, gerr := lkv.Get(context.TODO(), "k"); gerr != nil { @@ -761,9 +691,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { } resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys, got %d", len(resp.Kvs)) } @@ -778,12 +706,10 @@ func TestLeasingTxnOwnerIf(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) // served through cache clus.Members[0].Stop(t) @@ -877,15 +803,13 @@ func TestLeasingTxnCancel(t *testing.T) { defer closeLKV2() // acquire lease but disconnect so no revoke in time - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv1.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) // wait for leader election, if any - if _, err = clus.Client(1).Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(context.TODO(), "abc") + require.NoError(t, err) ctx, cancel := context.WithCancel(context.TODO()) go func() { @@ -910,19 +834,15 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { require.NoError(t, err) defer closeLKV2() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k2", "123"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "k2", "123") + require.NoError(t, err) // cache in lkv - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k2"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "k2") + require.NoError(t, err) // invalidate via lkv2 txn opArray := make([]clientv3.Op, 0) opArray = append(opArray, clientv3.OpPut("k2", "456")) @@ -939,9 +859,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { } // check cache was invalidated gresp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } + require.NoError(t, gerr) if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Value) != "def" { t.Errorf(`expected value "def", got %+v`, gresp) } @@ -993,9 +911,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) { for i := 0; i < keyCount; i++ { k, v := fmt.Sprintf("k-%d", i), fmt.Sprintf("%d", i) dat[i], err1 = clus.Client(0).Put(context.TODO(), k, v) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) } // nondeterministically populate leasing caches @@ -1090,9 +1006,8 @@ func TestLeasingOwnerPutError(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) @@ -1111,9 +1026,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) { require.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) @@ -1162,9 +1076,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { require.NoError(t, err) } - if _, err = lkv.Get(context.TODO(), "key/1"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "key/1") + require.NoError(t, err) opResp, delErr := lkv.Do(context.TODO(), del) if delErr != nil { @@ -1175,9 +1088,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { // confirm keys are invalidated from cache and deleted on etcd for i := 0; i < 8; i++ { resp, err := lkv.Get(context.TODO(), fmt.Sprintf("key/%d", i)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys on key/%d, got %+v", i, resp) } @@ -1216,9 +1127,8 @@ func TestLeasingDeleteRangeBounds(t *testing.T) { require.NoError(t, err) } - if _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } + _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()) + require.NoError(t, err) // leases still on server? for _, k := range []string{"j", "m"} { @@ -1234,12 +1144,10 @@ func TestLeasingDeleteRangeBounds(t *testing.T) { // j and m should still have leases registered since not under k* clus.Members[0].Stop(t) - if _, err = getkv.Get(context.TODO(), "j"); err != nil { - t.Fatal(err) - } - if _, err = getkv.Get(context.TODO(), "m"); err != nil { - t.Fatal(err) - } + _, err = getkv.Get(context.TODO(), "j") + require.NoError(t, err) + _, err = getkv.Get(context.TODO(), "m") + require.NoError(t, err) } func TestLeasingDeleteRangeContendTxn(t *testing.T) { @@ -1300,13 +1208,9 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { for i := 0; i < maxKey; i++ { key := fmt.Sprintf("key/%d", i) resp, err := putkv.Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) servResp, err := clus.Client(0).Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if !reflect.DeepEqual(resp.Kvs, servResp.Kvs) { t.Errorf("#%d: expected %+v, got %+v", i, servResp.Kvs, resp.Kvs) } @@ -1350,18 +1254,14 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) { wg.Wait() resp, err := lkvs[0].Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) > 0 { t.Fatalf("expected no kvs, got %+v", resp.Kvs) } resp, err = clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) > 0 { t.Fatalf("expected no kvs, got %+v", resp.Kvs) } @@ -1382,9 +1282,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) { require.NoError(t, err2) defer closeLKV2() - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err := lkv1.Get(context.TODO(), "k") + require.NoError(t, err) // force leader away from member 0 clus.Members[0].Stop(t) @@ -1443,39 +1342,30 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { require.NoError(t, err2) defer closeLKV2() - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err := lkv1.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) clus.WaitLeader(t) // put some more revisions for compaction - _, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(context.TODO(), "a", "123") + require.NoError(t, err) presp, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // compact while lkv1 is disconnected rev := presp.Header.Revision - if _, err = clus.Client(1).Compact(context.TODO(), rev); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Compact(context.TODO(), rev) + require.NoError(t, err) clus.Members[0].Restart(t) cctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) defer cancel() - if _, err = lkv2.Put(cctx, "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv2.Put(cctx, "k", "v") + require.NoError(t, err) resp, err := lkv1.Get(cctx, "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Value) != "v" { t.Fatalf(`expected "v" value, got %+v`, resp) } diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go index ab9be52c071..7283006d26c 100644 --- a/tests/integration/clientv3/maintenance_test.go +++ b/tests/integration/clientv3/maintenance_test.go @@ -61,9 +61,7 @@ func TestMaintenanceHashKV(t *testing.T) { _, err := cli.Get(context.TODO(), "foo") require.NoError(t, err) hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL, 0) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if hv == 0 { hv = hresp.Hash continue @@ -83,9 +81,7 @@ func TestCompactionHash(t *testing.T) { defer clus.Terminate(t) cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL}, 1000) } @@ -140,9 +136,7 @@ func TestMaintenanceMoveLeader(t *testing.T) { cli = clus.Client(oldLeadIdx) _, err = cli.MoveLeader(context.Background(), target) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leadIdx := clus.WaitLeader(t) lead := uint64(clus.Members[leadIdx].ID()) @@ -172,9 +166,7 @@ func TestMaintenanceSnapshotCancel(t *testing.T) { populateDataIntoCluster(t, clus, 3, 1024*1024) rc1, err := clus.RandClient().Snapshot(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc1.Close() // read 16 bytes to ensure that server opens snapshot @@ -232,9 +224,7 @@ func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, populateDataIntoCluster(t, clus, 3, 1024*1024) rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc2.Close() time.Sleep(2 * time.Second) @@ -290,9 +280,7 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co // reading snapshot with canceled context should error out ctx, cancel := context.WithCancel(context.Background()) rc1, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc1.Close() donec := make(chan struct{}) @@ -311,9 +299,7 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc2.Close() // 300ms left and expect timeout while snapshot reading is in progress @@ -339,9 +325,7 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { // reading snapshot with canceled context should error out resp, err := clus.RandClient().SnapshotWithVersion(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer resp.Snapshot.Close() if resp.Version != "3.6.0" { t.Errorf("unexpected version, expected %q, got %q", version.Version, resp.Version) @@ -411,18 +395,14 @@ func TestMaintenanceStatus(t *testing.T) { t.Logf("Creating client...") cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() t.Logf("Creating client [DONE]") prevID, leaderFound := uint64(0), false for i := 0; i < 3; i++ { resp, err := cli.Status(context.TODO(), eps[i]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Response from %v: %v", i, resp) if resp.DbSizeQuota != storage.DefaultQuotaBytes { t.Errorf("unexpected backend default quota returned: %d, expected %d", resp.DbSizeQuota, storage.DefaultQuotaBytes) diff --git a/tests/integration/clientv3/mirror_auth_test.go b/tests/integration/clientv3/mirror_auth_test.go index 8dedd4e94d2..1738fea5a64 100644 --- a/tests/integration/clientv3/mirror_auth_test.go +++ b/tests/integration/clientv3/mirror_auth_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/mvccpb" @@ -45,9 +46,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { // Seed /syncpath with some initial data _, err := initialClient.KV.Put(context.TODO(), "/syncpath/foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Require authentication authSetupRoot(t, initialClient.Auth) @@ -61,9 +60,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { Password: "syncfoo", } syncClient, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer syncClient.Close() // Now run the sync process, create changes, and get the initial sync state @@ -86,9 +83,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { // Update state _, err = syncClient.KV.Put(context.TODO(), "/syncpath/foo", "baz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Wait for the updated state to sync select { diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go index f21551bbdf0..e3bc9a73009 100644 --- a/tests/integration/clientv3/mirror_test.go +++ b/tests/integration/clientv3/mirror_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3/mirror" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -35,9 +37,7 @@ func TestMirrorSync(t *testing.T) { c := clus.Client(0) _, err := c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) syncer := mirror.NewSyncer(c, "", 0) gch, ech := syncer.SyncBase(context.TODO()) @@ -56,9 +56,7 @@ func TestMirrorSync(t *testing.T) { wch := syncer.SyncUpdates(context.TODO()) _, err = c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case r := <-wch: diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go index 2aad010f987..4ad7f60a28f 100644 --- a/tests/integration/clientv3/namespace_test.go +++ b/tests/integration/clientv3/namespace_test.go @@ -19,6 +19,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" @@ -34,21 +36,16 @@ func TestNamespacePutGet(t *testing.T) { c := clus.Client(0) nsKV := namespace.NewKV(c.KV, "foo/") - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } + _, err := nsKV.Put(context.TODO(), "abc", "bar") + require.NoError(t, err) resp, err := nsKV.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } resp, err = c.Get(context.TODO(), "foo/abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Value) != "bar" { t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) } @@ -64,9 +61,8 @@ func TestNamespaceWatch(t *testing.T) { nsKV := namespace.NewKV(c.KV, "foo/") nsWatcher := namespace.NewWatcher(c.Watcher, "foo/") - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } + _, err := nsKV.Put(context.TODO(), "abc", "bar") + require.NoError(t, err) nsWch := nsWatcher.Watch(context.TODO(), "abc", clientv3.WithRev(1)) wkv := &mvccpb.KeyValue{Key: []byte("abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1} diff --git a/tests/integration/clientv3/naming/endpoints_test.go b/tests/integration/clientv3/naming/endpoints_test.go index 8677e9732cd..3c93c6c7d19 100644 --- a/tests/integration/clientv3/naming/endpoints_test.go +++ b/tests/integration/clientv3/naming/endpoints_test.go @@ -102,7 +102,8 @@ func TestEndpointManagerAtomicity(t *testing.T) { err = em.Update(context.TODO(), []*endpoints.UpdateWithOpts{ endpoints.NewAddUpdateOpts("foo/host", endpoints.Endpoint{Addr: "127.0.0.1:2000"}), - endpoints.NewAddUpdateOpts("foo/host2", endpoints.Endpoint{Addr: "127.0.0.1:2001"})}) + endpoints.NewAddUpdateOpts("foo/host2", endpoints.Endpoint{Addr: "127.0.0.1:2001"}), + }) require.NoError(t, err) ctx, watchCancel := context.WithCancel(context.Background()) diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go index 75fcf9cce29..2ba6aaa8801 100644 --- a/tests/integration/clientv3/ordering_kv_test.go +++ b/tests/integration/clientv3/ordering_kv_test.go @@ -29,7 +29,7 @@ import ( ) func TestDetectKvOrderViolation(t *testing.T) { - var errOrderViolation = errors.New("DetectedOrderViolation") + errOrderViolation := errors.New("DetectedOrderViolation") integration2.BeforeTest(t) clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) @@ -43,27 +43,21 @@ func TestDetectKvOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { assert.NoError(t, cli.Close()) }() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // stop third member in order to force the member to have an outdated revision clus.Members[2].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // perform get request against the first member, in order to // set up kvOrdering to expect "foo" revisions greater than that of @@ -73,9 +67,7 @@ func TestDetectKvOrderViolation(t *testing.T) { return errOrderViolation }) v, err := orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Read from the first member: v:%v err:%v", v, err) assert.Equal(t, []byte("buzz"), v.Kvs[0].Value) @@ -96,7 +88,7 @@ func TestDetectKvOrderViolation(t *testing.T) { } func TestDetectTxnOrderViolation(t *testing.T) { - var errOrderViolation = errors.New("DetectedOrderViolation") + errOrderViolation := errors.New("DetectedOrderViolation") integration2.BeforeTest(t) clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) @@ -110,26 +102,21 @@ func TestDetectTxnOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { assert.NoError(t, cli.Close()) }() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // stop third member in order to force the member to have an outdated revision clus.Members[2].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(ctx, "foo", "buzz") + require.NoError(t, err) // perform get request against the first member, in order to // set up kvOrdering to expect "foo" revisions greater than that of @@ -144,9 +131,7 @@ func TestDetectTxnOrderViolation(t *testing.T) { ).Then( clientv3.OpGet("foo"), ).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // ensure that only the third member is queried during requests clus.Members[0].Stop(t) diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go index 6313957bf3f..4bd3d32bae3 100644 --- a/tests/integration/clientv3/ordering_util_test.go +++ b/tests/integration/clientv3/ordering_util_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -39,20 +41,16 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { } cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL}} cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has current revision for key "foo" - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // create partition between third members and the first two members // in order to guarantee that the third member's revision of "foo" @@ -61,9 +59,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { time.Sleep(1 * time.Second) // give enough time for the operation // update to "foo" will not be replicated to the third member due to the partition - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(ctx, "foo", "buzz") + require.NoError(t, err) cli.SetEndpoints(eps...) time.Sleep(1 * time.Second) // give enough time for the operation @@ -71,9 +68,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { // set prevRev to the second member's revision of "foo" such that // the revision is higher than the third member's revision of "foo" _, err = orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Reconfigure client to speak only to the 'partitioned' member") cli.SetEndpoints(clus.Members[2].GRPCURL) @@ -106,9 +101,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() eps := cli.Endpoints() ctx := context.TODO() @@ -116,9 +109,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { cli.SetEndpoints(clus.Members[0].GRPCURL) time.Sleep(1 * time.Second) _, err = cli.Put(ctx, "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // stop fourth member in order to force the member to have an outdated revision clus.Members[3].Stop(t) @@ -127,9 +118,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { clus.Members[4].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.SetEndpoints(eps...) time.Sleep(1 * time.Second) // give enough time for operation @@ -137,21 +126,15 @@ func TestUnresolvableOrderViolation(t *testing.T) { // set prevRev to the first member's revision of "foo" such that // the revision is higher than the fourth and fifth members' revision of "foo" _, err = OrderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) clus.Members[0].Stop(t) clus.Members[1].Stop(t) clus.Members[2].Stop(t) err = clus.Members[3].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = clus.Members[4].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) clus.Members[3].WaitStarted(t) cli.SetEndpoints(clus.Members[3].GRPCURL) time.Sleep(1 * time.Second) // give enough time for operation diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go index 221247d2f7a..1e0e247f23e 100644 --- a/tests/integration/clientv3/txn_test.go +++ b/tests/integration/clientv3/txn_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" @@ -150,14 +152,10 @@ func TestTxnSuccess(t *testing.T) { ctx := context.TODO() _, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp, err := kv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" { t.Fatalf("unexpected Get response %v", resp) } @@ -171,12 +169,9 @@ func TestTxnCompareRange(t *testing.T) { kv := clus.Client(0) fooResp, err := kv.Put(context.TODO(), "foo/", "bar") - if err != nil { - t.Fatal(err) - } - if _, err = kv.Put(context.TODO(), "foo/a", "baz"); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = kv.Put(context.TODO(), "foo/a", "baz") + require.NoError(t, err) tresp, terr := kv.Txn(context.TODO()).If( clientv3.Compare( clientv3.CreateRevision("foo/"), "=", fooResp.Header.Revision). @@ -204,25 +199,19 @@ func TestTxnNested(t *testing.T) { clientv3.OpPut("foo", "bar"), clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpPut("abc", "123")}, nil)). Else(clientv3.OpPut("foo", "baz")).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(tresp.Responses) != 2 { t.Errorf("expected 2 top-level txn responses, got %+v", tresp.Responses) } // check txn writes were applied resp, err := kv.Get(context.TODO(), "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" { t.Errorf("unexpected Get response %+v", resp) } resp, err = kv.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "123" { t.Errorf("unexpected Get response %+v", resp) } diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go index e1f20fe1986..201740fefcf 100644 --- a/tests/integration/clientv3/user_test.go +++ b/tests/integration/clientv3/user_test.go @@ -37,9 +37,7 @@ func TestUserError(t *testing.T) { authapi := clus.RandClient() _, err := authapi.UserAdd(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = authapi.UserAdd(context.TODO(), "foo", "bar") if !errors.Is(err, rpctypes.ErrUserAlreadyExist) { @@ -138,29 +136,22 @@ func TestUserErrorAuth(t *testing.T) { cfg.Username, cfg.Password = "root", "123" authed, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer authed.Close() - if _, err := authed.UserList(context.TODO()); err != nil { - t.Fatal(err) - } + _, err = authed.UserList(context.TODO()) + require.NoError(t, err) } func authSetupRoot(t *testing.T, auth clientv3.Auth) { - if _, err := auth.UserAdd(context.TODO(), "root", "123"); err != nil { - t.Fatal(err) - } - if _, err := auth.RoleAdd(context.TODO(), "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.UserGrantRole(context.TODO(), "root", "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.AuthEnable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err := auth.UserAdd(context.TODO(), "root", "123") + require.NoError(t, err) + _, err = auth.RoleAdd(context.TODO(), "root") + require.NoError(t, err) + _, err = auth.UserGrantRole(context.TODO(), "root", "root") + require.NoError(t, err) + _, err = auth.AuthEnable(context.TODO()) + require.NoError(t, err) } // TestGetTokenWithoutAuth is when Client can connect to etcd even if they @@ -177,9 +168,8 @@ func TestGetTokenWithoutAuth(t *testing.T) { var client *clientv3.Client // make sure "auth" was disabled - if _, err = authapi.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err = authapi.AuthDisable(context.TODO()) + require.NoError(t, err) // "Username" and "Password" must be used cfg := clientv3.Config{ diff --git a/tests/integration/clientv3/util.go b/tests/integration/clientv3/util.go index 623e575be11..0e84115704c 100644 --- a/tests/integration/clientv3/util.go +++ b/tests/integration/clientv3/util.go @@ -114,7 +114,6 @@ func populateDataIntoCluster(t *testing.T, cluster *integration2.Cluster, numKey for i := 0; i < numKeys; i++ { _, err := cluster.RandClient().Put(ctx, fmt.Sprintf("%s-%v", t.Name(), i), strings.Repeat("a", valueSize)) - if err != nil { t.Errorf("populating data expected no error, but got %v", err) } diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go index 5f229c554ea..97439ff8f4b 100644 --- a/tests/integration/clientv3/watch_test.go +++ b/tests/integration/clientv3/watch_test.go @@ -160,9 +160,8 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) { for i := 0; i < numKeyUpdates; i++ { for _, k := range keys { v := fmt.Sprintf("%s-%d", k, i) - if _, err := wctx.kv.Put(ctx, k, v); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(ctx, k, v) + require.NoError(t, err) } } } @@ -216,9 +215,8 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) { // spinning on dropping connections may trigger a leader election // due to resource starvation; l-read to ensure the cluster is stable ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - if _, err := wctx.kv.Get(ctx, "_"); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Get(ctx, "_") + require.NoError(t, err) cancel() // ensure watcher works @@ -308,9 +306,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { t.Fatalf("expected non-nil watcher channel") } - if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(ctx, "a", "a") + require.NoError(t, err) cancel() select { case <-time.After(time.Second): @@ -333,9 +330,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { } func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { - if _, err := wctx.kv.Put(context.TODO(), key, val); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(context.TODO(), key, val) + require.NoError(t, err) select { case <-time.After(5 * time.Second): t.Fatalf("watch timed out") @@ -360,16 +356,13 @@ func TestWatchResumeAfterDisconnect(t *testing.T) { defer clus.Terminate(t) cli := clus.Client(0) - if _, err := cli.Put(context.TODO(), "b", "2"); err != nil { - t.Fatal(err) - } - if _, err := cli.Put(context.TODO(), "a", "3"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "b", "2") + require.NoError(t, err) + _, err = cli.Put(context.TODO(), "a", "3") + require.NoError(t, err) // if resume is broken, it'll pick up this key first instead of a=3 - if _, err := cli.Put(context.TODO(), "a", "4"); err != nil { - t.Fatal(err) - } + _, err = cli.Put(context.TODO(), "a", "4") + require.NoError(t, err) // watch from revision 1 wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify()) @@ -441,9 +434,8 @@ func TestWatchResumeCompacted(t *testing.T) { _, err := kv.Put(context.TODO(), "foo", "bar") require.NoError(t, err) } - if _, err := kv.Compact(context.TODO(), 3); err != nil { - t.Fatal(err) - } + _, err := kv.Compact(context.TODO(), 3) + require.NoError(t, err) clus.Members[0].Restart(t) @@ -509,9 +501,8 @@ func TestWatchCompactRevision(t *testing.T) { w := clus.RandClient() - if _, err := kv.Compact(context.TODO(), 4); err != nil { - t.Fatal(err) - } + _, err := kv.Compact(context.TODO(), 4) + require.NoError(t, err) wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2)) // get compacted error message @@ -566,9 +557,8 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { } kvc := clus.RandClient() - if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.TODO(), "foox", "bar") + require.NoError(t, err) select { case resp := <-rch: @@ -576,8 +566,10 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision) } if watchOnPut { // wait for put if watch on the put key - ev := []*clientv3.Event{{Type: clientv3.EventTypePut, - Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}} + ev := []*clientv3.Event{{ + Type: clientv3.EventTypePut, + Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, + }} if !reflect.DeepEqual(ev, resp.Events) { t.Fatalf("expected %+v, got %+v", ev, resp.Events) } @@ -646,9 +638,7 @@ func TestWatchRequestProgress(t *testing.T) { } _, err := wc.Put(context.Background(), "/a", "1") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, rch := range watchChans { select { @@ -663,14 +653,10 @@ func TestWatchRequestProgress(t *testing.T) { // put a value not being watched to increment revision _, err = wc.Put(context.Background(), "x", "1") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = wc.RequestProgress(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // verify all watch channels receive a progress notify for _, rch := range watchChans { @@ -782,9 +768,8 @@ func TestWatchErrConnClosed(t *testing.T) { } }() - if err := cli.ActiveConnection().Close(); err != nil { - t.Fatal(err) - } + err := cli.ActiveConnection().Close() + require.NoError(t, err) clus.TakeClient(0) select { @@ -802,9 +787,7 @@ func TestWatchAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) donec := make(chan struct{}) go func() { @@ -833,9 +816,8 @@ func TestWatchWithRequireLeader(t *testing.T) { // ensure that it receives the update so watching after killing quorum // is guaranteed to have the key. liveClient := clus.Client(0) - if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := liveClient.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) clus.Members[1].Stop(t) clus.Members[2].Stop(t) @@ -883,13 +865,9 @@ func TestWatchWithRequireLeader(t *testing.T) { `type="stream"`, fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if cv < 2 { // >2 when retried t.Fatalf("expected at least 2, got %q", cnt) } @@ -908,12 +886,10 @@ func TestWatchWithFilter(t *testing.T) { wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut()) wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete()) - if _, err := client.Put(ctx, "a", "abc"); err != nil { - t.Fatal(err) - } - if _, err := client.Delete(ctx, "a"); err != nil { - t.Fatal(err) - } + _, err := client.Put(ctx, "a", "abc") + require.NoError(t, err) + _, err = client.Delete(ctx, "a") + require.NoError(t, err) npResp := <-wcNoPut if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete { @@ -1030,9 +1006,7 @@ func TestWatchCancelOnServer(t *testing.T) { time.Sleep(time.Second) minWatches, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) maxWatchV, minWatchV := 0, 0 n, serr := fmt.Sscanf(maxWatches+" "+minWatches, "%d %d", &maxWatchV, &minWatchV) @@ -1083,9 +1057,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) // issue concurrent watches on "abc" with cancel cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "abc", "def") + require.NoError(t, err) ch := make(chan struct{}, n) tCtx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() @@ -1155,9 +1128,7 @@ func TestWatchCancelAndCloseClient(t *testing.T) { } }() cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) <-donec clus.TakeClient(0) } @@ -1179,9 +1150,7 @@ func TestWatchStressResumeClose(t *testing.T) { } clus.Members[0].Bridge().DropConnections() cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, cli.Close()) clus.TakeClient(0) } diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 4aac7e2c824..064a0561144 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -208,9 +208,8 @@ func TestIssue2681(t *testing.T) { c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true}) defer c.Terminate(t) - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -234,9 +233,8 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -312,9 +310,8 @@ func TestIssue3699(t *testing.T) { t.Logf("Restarting member '0'...") // bring back node a // node a will remain useless as long as d is the leader. - if err := c.Members[0].Restart(t); err != nil { - t.Fatal(err) - } + err := c.Members[0].Restart(t) + require.NoError(t, err) t.Logf("Restarted member '0'.") select { @@ -530,9 +527,7 @@ func TestConcurrentRemoveMember(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -540,9 +535,8 @@ func TestConcurrentRemoveMember(t *testing.T) { c.Members[0].Client.MemberRemove(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } @@ -552,9 +546,7 @@ func TestConcurrentMoveLeader(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -562,8 +554,7 @@ func TestConcurrentMoveLeader(t *testing.T) { c.Members[0].Client.MoveLeader(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } diff --git a/tests/integration/embed/embed_test.go b/tests/integration/embed/embed_test.go index 7c1b86e289b..7a3d11f1cd4 100644 --- a/tests/integration/embed/embed_test.go +++ b/tests/integration/embed/embed_test.go @@ -40,14 +40,12 @@ import ( "go.etcd.io/etcd/tests/v3/framework/testutils" ) -var ( - testTLSInfo = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../../fixtures/server.key.insecure"), - CertFile: testutils.MustAbsPath("../../fixtures/server.crt"), - TrustedCAFile: testutils.MustAbsPath("../../fixtures/ca.crt"), - ClientCertAuth: true, - } -) +var testTLSInfo = transport.TLSInfo{ + KeyFile: testutils.MustAbsPath("../../fixtures/server.key.insecure"), + CertFile: testutils.MustAbsPath("../../fixtures/server.crt"), + TrustedCAFile: testutils.MustAbsPath("../../fixtures/ca.crt"), + ClientCertAuth: true, +} func TestEmbedEtcd(t *testing.T) { testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests") diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go index 7061ed61e63..e3b762c31ef 100644 --- a/tests/integration/grpc_test.go +++ b/tests/integration/grpc_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" clientv3 "go.etcd.io/etcd/client/v3" @@ -105,9 +106,7 @@ func TestAuthority(t *testing.T) { putRequestMethod := "/etcdserverpb.KV/Put" for i := 0; i < 100; i++ { _, err := kv.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } assertAuthority(t, tc.expectAuthorityPattern, clus, putRequestMethod) @@ -121,9 +120,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integra if useTLS { cfg.ClientTLS = &integration.TestTLSInfo tlsConfig, err := integration.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return cfg, tlsConfig } return cfg, nil @@ -138,9 +135,7 @@ func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster DialOptions: []grpc.DialOption{grpc.WithBlock()}, TLS: tlsConfig, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return kv } diff --git a/tests/integration/hashkv_test.go b/tests/integration/hashkv_test.go index cbc83d0159e..26d5dfaf040 100644 --- a/tests/integration/hashkv_test.go +++ b/tests/integration/hashkv_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" @@ -36,9 +38,7 @@ func TestCompactionHash(t *testing.T) { defer clus.Terminate(t) cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go index efd6598f684..b012370ab2c 100644 --- a/tests/integration/member_test.go +++ b/tests/integration/member_test.go @@ -58,9 +58,7 @@ func TestRestartMember(t *testing.T) { c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) err := c.Members[i].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go index 047b57ef150..652856ebb4f 100644 --- a/tests/integration/metrics_test.go +++ b/tests/integration/metrics_test.go @@ -38,9 +38,7 @@ func TestMetricDbSizeBoot(t *testing.T) { defer clus.Terminate(t) v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if v == "0" { t.Fatalf("expected non-zero, got %q", v) @@ -74,40 +72,29 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { expected := numPuts * len(putreq.Value) beforeDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) bv, err := strconv.Atoi(beforeDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv < expected { t.Fatalf("expected db size greater than %d, got %d", expected, bv) } beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) biu, err := strconv.Atoi(beforeDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if biu < expected { t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu) } // clear out historical keys, in use bytes should free pages creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true} - if _, kerr := kvc.Compact(context.TODO(), creq); kerr != nil { - t.Fatal(kerr) - } + _, kerr := kvc.Compact(context.TODO(), creq) + require.NoError(t, kerr) validateAfterCompactionInUse := func() error { // Put to move PendingPages to FreePages _, verr := kvc.Put(context.TODO(), putreq) - if verr != nil { - t.Fatal(verr) - } + require.NoError(t, verr) time.Sleep(500 * time.Millisecond) afterCompactionInUse, verr := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") @@ -142,25 +129,17 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { mc.Defragment(context.TODO(), &pb.DefragmentRequest{}) afterDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) av, err := strconv.Atoi(afterDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv <= av { t.Fatalf("expected less than %d, got %d after defrag", bv, av) } afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) adiu, err := strconv.Atoi(afterDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if adiu > av { t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av) } @@ -172,13 +151,9 @@ func TestMetricQuotaBackendBytes(t *testing.T) { defer clus.Terminate(t) qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) qv, err := strconv.ParseFloat(qs, 64) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if int64(qv) != storage.DefaultQuotaBytes { t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv) } @@ -190,9 +165,7 @@ func TestMetricsHealth(t *testing.T) { defer clus.Terminate(t) tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) u := clus.Members[0].ClientURLs[0] u.Path = "/health" resp, err := tr.RoundTrip(&http.Request{ @@ -201,14 +174,10 @@ func TestMetricsHealth(t *testing.T) { URL: &u, }) resp.Body.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) hv, err := clus.Members[0].Metric("etcd_server_health_failures") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if hv != "0" { t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv) } @@ -222,7 +191,8 @@ func TestMetricsRangeDurationSeconds(t *testing.T) { client := clus.RandClient() keys := []string{ - "my-namespace/foobar", "my-namespace/foobar1", "namespace/foobar1"} + "my-namespace/foobar", "my-namespace/foobar1", "namespace/foobar1", + } for _, key := range keys { _, err := client.Put(context.Background(), key, "data") require.NoError(t, err) diff --git a/tests/integration/proxy/grpcproxy/cluster_test.go b/tests/integration/proxy/grpcproxy/cluster_test.go index 58ea4e4bd95..ca2fcb506b3 100644 --- a/tests/integration/proxy/grpcproxy/cluster_test.go +++ b/tests/integration/proxy/grpcproxy/cluster_test.go @@ -74,13 +74,13 @@ func TestClusterProxyMemberList(t *testing.T) { } assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{cts.caddr}}) - //test proxy member add + // test proxy member add newMemberAddr := "127.0.0.2:6789" grpcproxy.Register(lg, cts.c, prefix, newMemberAddr, 7) // wait some time for proxy update members time.Sleep(200 * time.Millisecond) - //check add member succ + // check add member succ mresp, err = client.Cluster.MemberList(context.Background()) if err != nil { t.Fatalf("err %v, want nil", err) @@ -90,12 +90,12 @@ func TestClusterProxyMemberList(t *testing.T) { } assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{newMemberAddr}}) - //test proxy member delete + // test proxy member delete deregisterMember(cts.c, prefix, newMemberAddr, t) // wait some time for proxy update members time.Sleep(200 * time.Millisecond) - //check delete member succ + // check delete member succ mresp, err = client.Cluster.MemberList(context.Background()) if err != nil { t.Fatalf("err %v, want nil", err) diff --git a/tests/integration/revision_test.go b/tests/integration/revision_test.go index 74792546820..24bcb9066ff 100644 --- a/tests/integration/revision_test.go +++ b/tests/integration/revision_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc/status" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -95,7 +97,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, wg.Add(1) go func() { defer wg.Done() - getWorker(ctx, t, clus) + getWorker(ctx, t, clus) //nolint:testifylint }() } @@ -103,9 +105,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, wg.Wait() kv := clus.Client(0) resp, err := kv.Get(context.Background(), "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Revision %d", resp.Header.Revision) } @@ -116,9 +116,7 @@ func putWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + assert.NoError(t, silenceConnectionErrors(err)) } } @@ -130,9 +128,7 @@ func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + require.NoError(t, silenceConnectionErrors(err)) if resp == nil { continue } diff --git a/tests/integration/snapshot/v3_snapshot_test.go b/tests/integration/snapshot/v3_snapshot_test.go index b9739e2ab1c..893fa4fd038 100644 --- a/tests/integration/snapshot/v3_snapshot_test.go +++ b/tests/integration/snapshot/v3_snapshot_test.go @@ -205,7 +205,8 @@ const testClusterTkn = "tkn" func restoreCluster(t *testing.T, clusterN int, dbPath string) ( cURLs []url.URL, pURLs []url.URL, - srvs []*embed.Etcd) { + srvs []*embed.Etcd, +) { urls := newEmbedURLs(t, clusterN*2) cURLs, pURLs = urls[:clusterN], urls[clusterN:] diff --git a/tests/integration/testing_test.go b/tests/integration/testing_test.go index dfd75e89344..f49e58d83d7 100644 --- a/tests/integration/testing_test.go +++ b/tests/integration/testing_test.go @@ -24,6 +24,5 @@ func TestBeforeTestWithoutLeakDetection(t *testing.T) { integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort()) // Intentional leak that should get ignored go func() { - }() } diff --git a/tests/integration/tracing_test.go b/tests/integration/tracing_test.go index 744028322d3..d255e958308 100644 --- a/tests/integration/tracing_test.go +++ b/tests/integration/tracing_test.go @@ -40,9 +40,7 @@ func TestTracing(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") // set up trace collector listener, err := net.Listen("tcp", "localhost:") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) traceFound := make(chan struct{}) defer close(traceFound) @@ -50,7 +48,8 @@ func TestTracing(t *testing.T) { srv := grpc.NewServer() traceservice.RegisterTraceServiceServer(srv, &traceServer{ traceFound: traceFound, - filterFunc: containsNodeListSpan}) + filterFunc: containsNodeListSpan, + }) go srv.Serve(listener) defer srv.Stop() @@ -63,9 +62,7 @@ func TestTracing(t *testing.T) { // start an etcd instance with tracing enabled etcdSrv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer etcdSrv.Close() select { @@ -92,7 +89,8 @@ func TestTracing(t *testing.T) { dialOptions := []grpc.DialOption{ grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)), - grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...))} + grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...)), + } ccfg := clientv3.Config{DialOptions: dialOptions, Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}} cli, err := integration.NewClient(t, ccfg) if err != nil { @@ -141,7 +139,7 @@ type traceServer struct { } func (t *traceServer) Export(ctx context.Context, req *traceservice.ExportTraceServiceRequest) (*traceservice.ExportTraceServiceResponse, error) { - var emptyValue = traceservice.ExportTraceServiceResponse{} + emptyValue := traceservice.ExportTraceServiceResponse{} if t.filterFunc(req) { t.traceFound <- struct{}{} } diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go index 33e1b0aecd4..d3646735d65 100644 --- a/tests/integration/utl_wal_version_test.go +++ b/tests/integration/utl_wal_version_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/zap" "go.etcd.io/etcd/client/pkg/v3/testutil" @@ -37,9 +38,7 @@ func TestEtcdVersionFromWAL(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") cfg := integration.NewEmbedConfig(t, "default") srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-srv.Server.ReadyNotify(): case <-time.After(3 * time.Second): @@ -76,15 +75,11 @@ func TestEtcdVersionFromWAL(t *testing.T) { srv.Close() w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer w.Close() walVersion, err := wal.ReadWALVersion(w) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion()) } diff --git a/tests/integration/v2store/main_test.go b/tests/integration/v2store/main_test.go index 89026c5a773..b5534268a0b 100644 --- a/tests/integration/v2store/main_test.go +++ b/tests/integration/v2store/main_test.go @@ -20,8 +20,6 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" ) -//var endpoints []string - func TestMain(m *testing.M) { //cfg := integration.ClusterConfig{Size: 1} //clus := integration.NewClusterV3(nil, &cfg) diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go index 50a701e6809..4b1218caca7 100644 --- a/tests/integration/v3_alarm_test.go +++ b/tests/integration/v3_alarm_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -63,15 +64,11 @@ func TestV3StorageQuotaApply(t *testing.T) { // test big put bigbuf := make([]byte, quotasize) _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // quorum get should work regardless of whether alarm is raised _, err = kvc0.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // wait until alarm is raised for sure-- poll the alarms stopc := time.After(5 * time.Second) @@ -111,9 +108,7 @@ func TestV3StorageQuotaApply(t *testing.T) { }, }, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) defer cancel() @@ -152,25 +147,22 @@ func TestV3AlarmDeactivate(t *testing.T) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - if _, err := mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err := mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) key := []byte("abc") smallbuf := make([]byte, 512) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) if err == nil && !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } alarmReq.Action = pb.AlarmRequest_DEACTIVATE - if _, err = mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err = mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) - if _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) } func TestV3CorruptAlarm(t *testing.T) { @@ -209,15 +201,12 @@ func TestV3CorruptAlarm(t *testing.T) { time.Sleep(time.Second * 2) // Wait for cluster so Puts succeed in case member 0 was the leader. - if _, err := clus.Client(1).Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "xyz", "321"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "abc", "fed"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(1).Get(context.TODO(), "k") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "xyz", "321") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "abc", "fed") + require.NoError(t, err) // Restart with corruption checking enabled. clus.Members[1].Stop(t) @@ -231,14 +220,10 @@ func TestV3CorruptAlarm(t *testing.T) { clus.Members[0].WaitStarted(t) resp0, err0 := clus.Client(0).Get(context.TODO(), "abc") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) clus.Members[1].WaitStarted(t) resp1, err1 := clus.Client(1).Get(context.TODO(), "abc") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Kvs[0].ModRevision == resp1.Kvs[0].ModRevision { t.Fatalf("matching ModRevision values") @@ -290,9 +275,7 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { } } - if err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil { - t.Fatal(err) - } + require.NoError(t, clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID()))) clus.WaitMembersForLeader(t, clus.Members) clus.AddMember(t) @@ -314,30 +297,20 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { schema.MustUnsafePutLease(tx, &lpb) tx.Commit() - if err = be.Close(); err != nil { - t.Fatal(err) - } + require.NoError(t, be.Close()) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + require.NoError(t, clus.Members[2].Restart(t)) clus.Members[1].WaitOK(t) clus.Members[2].WaitOK(t) // Revoke lease should remove key except the member with corruption _, err = integration.ToGRPC(clus.Members[0].Client).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp0, err0 := clus.Members[1].Client.KV.Get(context.TODO(), "foo") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Header.Revision == resp1.Header.Revision { t.Fatalf("matching Revision values") diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go index d62ea7a03f1..f19325049a6 100644 --- a/tests/integration/v3_auth_test.go +++ b/tests/integration/v3_auth_test.go @@ -101,9 +101,8 @@ func TestV3AuthTokenWithDisable(t *testing.T) { }() time.Sleep(10 * time.Millisecond) - if _, err := c.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err := c.AuthDisable(context.TODO()) + require.NoError(t, err) time.Sleep(10 * time.Millisecond) cancel() @@ -168,14 +167,11 @@ func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterCon defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID - if _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)); err != nil { - t.Fatal(err) - } + _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)) + require.NoError(t, err) // wait for lease expire time.Sleep(3 * time.Second) @@ -229,15 +225,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k3 isn't granted to user1 _, err = rootc.Put(context.TODO(), "k3", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { @@ -288,31 +280,21 @@ func TestV3AuthWithLeaseAttach(t *testing.T) { defer user2c.Close() leaseResp, err := user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k2 is also granted to user2 _, err = user1c.Put(context.TODO(), "k2", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseResp, err = user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID = leaseResp.ID // permission of k1 isn't granted to user2 _, err = user1c.Put(context.TODO(), "k1", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) if err == nil { @@ -353,9 +335,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) { }, } authSetupUsers(t, auth, root) - if _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}); err != nil { - t.Fatal(err) - } + _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}) + require.NoError(t, err) } func TestV3AuthNonAuthorizedRPCs(t *testing.T) { diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index 350bfb354dd..b49097ca861 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -155,9 +155,8 @@ func TestElectionFailover(t *testing.T) { }() // invoke leader failover - if err := ss[0].Close(); err != nil { - t.Fatal(err) - } + err := ss[0].Close() + require.NoError(t, err) // check new leader e = concurrency.NewElection(ss[2], "test-election") @@ -192,13 +191,11 @@ func TestElectionSessionRecampaign(t *testing.T) { defer session.Orphan() e := concurrency.NewElection(session, "test-elect") - if err := e.Campaign(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + err = e.Campaign(context.TODO(), "abc") + require.NoError(t, err) e2 := concurrency.NewElection(session, "test-elect") - if err := e2.Campaign(context.TODO(), "def"); err != nil { - t.Fatal(err) - } + err = e2.Campaign(context.TODO(), "def") + require.NoError(t, err) ctx, cancel := context.WithCancel(context.TODO()) defer cancel() @@ -217,22 +214,19 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) { defer clus.Terminate(t) cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "testa", "value"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "testa", "value") + require.NoError(t, err) s, serr := concurrency.NewSession(cli) if serr != nil { t.Fatal(serr) } e := concurrency.NewElection(s, "test") ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - err := e.Campaign(ctx, "abc") + err = e.Campaign(ctx, "abc") cancel() - if err != nil { - // after 5 seconds, deadlock results in - // 'context deadline exceeded' here. - t.Fatal(err) - } + // after 5 seconds, deadlock results in + // 'context deadline exceeded' here. + require.NoError(t, err) } // TestElectionOnSessionRestart tests that a quick restart of leader (resulting @@ -245,9 +239,7 @@ func TestElectionOnSessionRestart(t *testing.T) { cli := clus.RandClient() session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) e := concurrency.NewElection(session, "test-elect") if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { @@ -293,9 +285,7 @@ func TestElectionObserveCompacted(t *testing.T) { cli := clus.Client(0) session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer session.Orphan() e := concurrency.NewElection(session, "test-elect") diff --git a/tests/integration/v3_failover_test.go b/tests/integration/v3_failover_test.go index d71dd267b85..c3cce80fb2a 100644 --- a/tests/integration/v3_failover_test.go +++ b/tests/integration/v3_failover_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -56,9 +57,7 @@ func TestFailover(t *testing.T) { defer clus.Terminate(t) cc, err := integration2.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create an etcd client before or after first server down t.Logf("Creating an etcd client [%s]", tc.name) cli, err := tc.testFunc(t, cc, clus) @@ -120,7 +119,6 @@ func putWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCo } return nil }() - if err != nil { retryCount-- if shouldRetry(err) { @@ -153,7 +151,6 @@ func getWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCo } return nil }() - if err != nil { retryCount-- if shouldRetry(err) { diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go index 7968e614edc..3d8b446e59b 100644 --- a/tests/integration/v3_grpc_inflight_test.go +++ b/tests/integration/v3_grpc_inflight_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -38,9 +39,8 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -69,9 +69,8 @@ func TestV3KVInflightRangeRequests(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go index e93f30e3b13..8396ffa18e6 100644 --- a/tests/integration/v3_grpc_test.go +++ b/tests/integration/v3_grpc_test.go @@ -135,11 +135,10 @@ func TestV3CompactCurrentRev(t *testing.T) { } } // get key to add to proxy cache, if any - if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) + require.NoError(t, err) // compact on current revision - _, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) + _, err = kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) if err != nil { t.Fatalf("couldn't compact kv space (%v)", err) } @@ -166,15 +165,11 @@ func TestV3HashKV(t *testing.T) { for i := 0; i < 10; i++ { resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rev := resp.Header.Revision hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -183,9 +178,7 @@ func TestV3HashKV(t *testing.T) { prevCompactRev := hresp.CompactRevision for i := 0; i < 10; i++ { hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -253,9 +246,10 @@ func TestV3TxnTooManyOps(t *testing.T) { newTxn := &pb.TxnRequest{} addSuccessOps(newTxn) txn.Success = append(txn.Success, - &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: newTxn, - }, + &pb.RequestOp{ + Request: &pb.RequestOp_RequestTxn{ + RequestTxn: newTxn, + }, }, ) } @@ -286,44 +280,53 @@ func TestV3TxnDuplicateKeys(t *testing.T) { defer clus.Terminate(t) putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}} - delKeyReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("abc"), + delKeyReq := &pb.RequestOp{ + Request: &pb.RequestOp_RequestDeleteRange{ + RequestDeleteRange: &pb.DeleteRangeRequest{ + Key: []byte("abc"), + }, }, - }, } - delInRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("a"), RangeEnd: []byte("b"), + delInRangeReq := &pb.RequestOp{ + Request: &pb.RequestOp_RequestDeleteRange{ + RequestDeleteRange: &pb.DeleteRangeRequest{ + Key: []byte("a"), RangeEnd: []byte("b"), + }, }, - }, } - delOutOfRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("abb"), RangeEnd: []byte("abc"), + delOutOfRangeReq := &pb.RequestOp{ + Request: &pb.RequestOp_RequestDeleteRange{ + RequestDeleteRange: &pb.DeleteRangeRequest{ + Key: []byte("abb"), RangeEnd: []byte("abc"), + }, }, - }, } - txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}}, - }, + txnDelReq := &pb.RequestOp{ + Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}}, + }, } - txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{ - Success: []*pb.RequestOp{delInRangeReq}, - Failure: []*pb.RequestOp{delInRangeReq}}, - }, + txnDelReqTwoSide := &pb.RequestOp{ + Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{ + Success: []*pb.RequestOp{delInRangeReq}, + Failure: []*pb.RequestOp{delInRangeReq}, + }, + }, } - txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}}, - }, + txnPutReq := &pb.RequestOp{ + Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}}, + }, } - txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{ - Success: []*pb.RequestOp{putreq}, - Failure: []*pb.RequestOp{putreq}}, - }, + txnPutReqTwoSide := &pb.RequestOp{ + Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{ + Success: []*pb.RequestOp{putreq}, + Failure: []*pb.RequestOp{putreq}, + }, + }, } kvc := integration.ToGRPC(clus.RandClient()).KV @@ -406,16 +409,12 @@ func TestV3TxnRevision(t *testing.T) { kvc := integration.ToGRPC(clus.RandClient()).KV pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} presp, err := kvc.Put(context.TODO(), pr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}} txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -425,9 +424,7 @@ func TestV3TxnRevision(t *testing.T) { txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -437,9 +434,7 @@ func TestV3TxnRevision(t *testing.T) { txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // updated revision if tresp.Header.Revision != presp.Header.Revision+1 { @@ -473,7 +468,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { // The read-only txn uses the optimized readindex server path. txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{ - RequestRange: &pb.RangeRequest{Key: []byte("k")}}} + RequestRange: &pb.RangeRequest{Key: []byte("k")}, + }} txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} // i = 0 /\ Succeeded => put followed txn cmp := &pb.Compare{ @@ -485,9 +481,7 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { txn.Compare = append(txn.Compare, cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) prev := <-revc err = <-errCh @@ -604,9 +598,7 @@ func TestV3TxnRangeCompare(t *testing.T) { txn := &pb.TxnRequest{} txn.Compare = append(txn.Compare, &tt.cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if tt.wSuccess != tresp.Succeeded { t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded) } @@ -653,9 +645,7 @@ func TestV3TxnNestedPath(t *testing.T) { } tresp, err := kvc.Txn(context.TODO(), topTxn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) curTxnResp := tresp for i := range txnPath { @@ -680,9 +670,7 @@ func TestV3PutIgnoreValue(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -709,7 +697,8 @@ func TestV3PutIgnoreValue(t *testing.T) { preq.IgnoreValue = true txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) + Request: &pb.RequestOp_RequestPut{RequestPut: &preq}, + }) _, err := kvc.Txn(context.TODO(), txn) return err }, @@ -732,7 +721,8 @@ func TestV3PutIgnoreValue(t *testing.T) { preq.IgnoreValue = true txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) + Request: &pb.RequestOp_RequestPut{RequestPut: &preq}, + }) _, err := kvc.Txn(context.TODO(), txn) return err }, @@ -811,9 +801,7 @@ func TestV3PutIgnoreLease(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -844,7 +832,8 @@ func TestV3PutIgnoreLease(t *testing.T) { preq.IgnoreLease = true txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) + Request: &pb.RequestOp_RequestPut{RequestPut: &preq}, + }) _, err := kvc.Txn(context.TODO(), txn) return err }, @@ -870,7 +859,8 @@ func TestV3PutIgnoreLease(t *testing.T) { preq.IgnoreLease = true txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) + Request: &pb.RequestOp_RequestPut{RequestPut: &preq}, + }) _, err := kvc.Txn(context.TODO(), txn) return err }, @@ -960,7 +950,9 @@ func TestV3PutMissingLease(t *testing.T) { txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) + RequestPut: preq, + }, + }) if tresp, err := kvc.Txn(context.TODO(), txn); err == nil { t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp) } @@ -970,7 +962,9 @@ func TestV3PutMissingLease(t *testing.T) { txn := &pb.TxnRequest{} txn.Failure = append(txn.Failure, &pb.RequestOp{ Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) + RequestPut: preq, + }, + }) cmp := &pb.Compare{ Result: pb.Compare_GREATER, Target: pb.Compare_CREATE, @@ -987,10 +981,14 @@ func TestV3PutMissingLease(t *testing.T) { rreq := &pb.RangeRequest{Key: []byte("bar")} txn.Success = append(txn.Success, &pb.RequestOp{ Request: &pb.RequestOp_RequestRange{ - RequestRange: rreq}}) + RequestRange: rreq, + }, + }) txn.Failure = append(txn.Failure, &pb.RequestOp{ Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) + RequestPut: preq, + }, + }) if tresp, err := kvc.Txn(context.TODO(), txn); err != nil { t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp) } @@ -1028,43 +1026,50 @@ func TestV3DeleteRange(t *testing.T) { "delete middle", []string{"foo", "foo/abc", "fop"}, "foo/", "fop", false, - [][]byte{[]byte("foo"), []byte("fop")}, 1, + [][]byte{[]byte("foo"), []byte("fop")}, + 1, }, { "no delete", []string{"foo", "foo/abc", "fop"}, "foo/", "foo/", false, - [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, 0, + [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, + 0, }, { "delete first", []string{"foo", "foo/abc", "fop"}, "fo", "fop", false, - [][]byte{[]byte("fop")}, 2, + [][]byte{[]byte("fop")}, + 2, }, { "delete tail", []string{"foo", "foo/abc", "fop"}, "foo/", "fos", false, - [][]byte{[]byte("foo")}, 2, + [][]byte{[]byte("foo")}, + 2, }, { "delete exact", []string{"foo", "foo/abc", "fop"}, "foo/abc", "", false, - [][]byte{[]byte("foo"), []byte("fop")}, 1, + [][]byte{[]byte("foo"), []byte("fop")}, + 1, }, { "delete none [x,x)", []string{"foo"}, "foo", "foo", false, - [][]byte{[]byte("foo")}, 0, + [][]byte{[]byte("foo")}, + 0, }, { "delete middle with preserveKVs set", []string{"foo", "foo/abc", "fop"}, "foo/", "fop", true, - [][]byte{[]byte("foo"), []byte("fop")}, 1, + [][]byte{[]byte("foo"), []byte("fop")}, + 1, }, } @@ -1147,12 +1152,16 @@ func TestV3TxnInvalidRange(t *testing.T) { txn := &pb.TxnRequest{} txn.Success = append(txn.Success, &pb.RequestOp{ Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) + RequestPut: preq, + }, + }) rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100} txn.Success = append(txn.Success, &pb.RequestOp{ Request: &pb.RequestOp_RequestRange{ - RequestRange: rreq}}) + RequestRange: rreq, + }, + }) if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCFutureRev) { t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev) @@ -1260,13 +1269,12 @@ func TestV3StorageQuotaAPI(t *testing.T) { // test small put that fits in quota smallbuf := make([]byte, 512) - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) // test big put bigbuf := make([]byte, quotasize) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } @@ -1717,7 +1725,8 @@ func testTLSReload( cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func(), - useIP bool) { + useIP bool, +) { integration.BeforeTest(t) // 1. separate copies for TLS assets modification diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go index 2cf8acf7ab9..1a5e7ce5ea2 100644 --- a/tests/integration/v3_kv_test.go +++ b/tests/integration/v3_kv_test.go @@ -18,6 +18,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -33,23 +35,15 @@ func TestKVWithEmptyValue(t *testing.T) { client := clus.RandClient() _, err := client.Put(context.Background(), "my-namespace/foobar", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "my-namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Range over all keys. resp, err := client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } @@ -57,24 +51,18 @@ func TestKVWithEmptyValue(t *testing.T) { // Range over all keys in a namespace. client.KV = namespace.NewKV(client.KV, "my-namespace/") resp, err = client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } - //Remove all keys without WithFromKey/WithPrefix func + // Remove all keys without WithFromKey/WithPrefix func _, err = client.Delete(context.Background(), "") - if err == nil { - // fatal error duo to without WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to without WithFromKey/WithPrefix func called. + require.Error(t, err) respDel, err := client.Delete(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - // fatal error duo to with WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to with WithFromKey/WithPrefix func called. + require.NoError(t, err) t.Logf("delete keys:%d", respDel.Deleted) } diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index aa1cd8ddc9c..ea2b730c618 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -59,15 +60,11 @@ func testMoveLeader(t *testing.T, auto bool) { target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberID()) if auto { err := clus.Members[oldLeadIdx].Server.TryTransferLeadershipOnShutdown() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } else { mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // wait until leader transitions have happened @@ -203,7 +200,6 @@ func TestFirstCommitNotification(t *testing.T) { } _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderID) - if err != nil { t.Errorf("got error during leadership transfer: %v", err) } diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go index 339b05a526d..6e5cff739f3 100644 --- a/tests/integration/v3_lease_test.go +++ b/tests/integration/v3_lease_test.go @@ -51,9 +51,7 @@ func TestV3LeasePromote(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) ttl := time.Duration(lresp.TTL) * time.Second afterGrant := time.Now() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -203,9 +201,8 @@ func TestV3LeaseNegativeID(t *testing.T) { time.Sleep(100 * time.Millisecond) // restore lessor from db file clus.Members[2].Stop(t) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + err = clus.Members[2].Restart(t) + require.NoError(t, err) // revoke lease should remove key integration.WaitClientV3(t, clus.Members[2].Client) @@ -217,9 +214,7 @@ func TestV3LeaseNegativeID(t *testing.T) { for _, m := range clus.Members { getr := &pb.RangeRequest{Key: tc.k} getresp, err := integration.ToGRPC(m.Client).KV.Range(ctx, getr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if revision == 0 { revision = getresp.Header.Revision } @@ -249,7 +244,9 @@ func TestV3LeaseExpire(t *testing.T) { wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: 1}}} + Key: []byte("foo"), StartRevision: 1, + }, + }} if err := wStream.Send(wreq); err != nil { return err } @@ -384,9 +381,7 @@ func TestV3LeaseCheckpoint(t *testing.T) { defer cancel() c := integration.ToGRPC(clus.RandClient()) lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: int64(tc.ttl.Seconds())}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < tc.leaderChanges; i++ { // wait for a checkpoint to occur @@ -440,9 +435,7 @@ func TestV3LeaseExists(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -467,9 +460,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -479,9 +470,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( context.Background(), &pb.LeaseLeasesRequest{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := range lresp.Leases { if lresp.Leases[i].ID != ids[i] { t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], lresp.Leases[i].ID) @@ -528,9 +517,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient if useClusterClient { clusterClient, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 300; i++ { go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }() } @@ -628,9 +615,7 @@ func TestV3GetNonExistLease(t *testing.T) { t.Errorf("failed to create lease %v", err) } _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseTTLr := &pb.LeaseTimeToLiveRequest{ ID: lresp.ID, @@ -663,49 +648,33 @@ func TestV3LeaseSwitch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) // attach key on lease1 then switch it to lease2 put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke lease1 should not remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("unexpect removal of key") } // revoke lease2 should remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -726,9 +695,7 @@ func TestV3LeaseFailover(t *testing.T) { // create lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -743,9 +710,7 @@ func TestV3LeaseFailover(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // send keep alive to old leader until the old leader starts // to drop lease request. @@ -790,9 +755,7 @@ func TestV3LeaseRequireLeader(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -825,16 +788,12 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -844,22 +803,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -876,22 +829,16 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure revoked key doesn't exist clus.Members[0].Stop(t) @@ -901,16 +848,12 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV defer nc.Close() rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -928,22 +871,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // overwrite lease with none _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -953,22 +890,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the detached lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } @@ -986,18 +917,14 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { var leaseIDs []int64 for i := 0; i < 2; i++ { lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } leaseIDs = append(leaseIDs, lresp.ID) _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // restart server and ensure lease still exists @@ -1013,36 +940,26 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the old lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[0]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // key should still exist rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } // revoke the latest lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[1]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -1147,20 +1064,15 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.Cluster, int64 defer clus.Terminate(t) leaseID, err := acquireLeaseAndKey(clus, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err = act(clus, leaseID); err != nil { - t.Fatal(err) - } + err = act(clus, leaseID) + require.NoError(t, err) // confirm no key rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go index 5880009d58b..a7c0781f0de 100644 --- a/tests/integration/v3_stm_test.go +++ b/tests/integration/v3_stm_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v3 "go.etcd.io/etcd/client/v3" @@ -230,15 +231,13 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { defer clus.Terminate(t) etcdc := clus.RandClient() - if _, err := etcdc.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := etcdc.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) donec, readyc := make(chan struct{}), make(chan struct{}) go func() { <-readyc - if _, err := etcdc.Delete(context.TODO(), "foo"); err != nil { - t.Error(err) - } + _, derr := etcdc.Delete(context.TODO(), "foo") + assert.NoError(t, derr) close(donec) }() @@ -256,9 +255,9 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { } iso := concurrency.WithIsolation(concurrency.RepeatableReads) - if _, err := concurrency.NewSTM(etcdc, applyf, iso); err != nil { - t.Fatalf("error on stm txn (%v)", err) - } + _, err = concurrency.NewSTM(etcdc, applyf, iso) + require.NoErrorf(t, err, "error on stm txn") + if try != 2 { t.Fatalf("STM apply expected to run twice, got %d", try) } diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go index 81601d18a5f..ea39edd46b6 100644 --- a/tests/integration/v3_tls_test.go +++ b/tests/integration/v3_tls_test.go @@ -62,9 +62,7 @@ func testTLSCipherSuites(t *testing.T, valid bool) { defer clus.Terminate(t) cc, err := cliTLS.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL}, DialTimeout: time.Second, diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index f7e2e4b4730..66823061fe1 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/tests/v3/framework/config" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -65,11 +67,10 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) - if errW != nil { - t.Fatal(errW) - } + require.NoError(t, errW) if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil { + CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}, + }}); err != nil { t.Fatalf("wStream.Send error: %v", err) } wresp, errR := wStream.Recv() diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go index e60805b3761..fcb2ab36edf 100644 --- a/tests/integration/v3_watch_test.go +++ b/tests/integration/v3_watch_test.go @@ -54,7 +54,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { []string{"foo"}, &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}}, + Key: []byte("foo"), + }, + }}, []*pb.WatchResponse{ { @@ -74,7 +76,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { []string{"foo"}, &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("helloworld")}}}, + Key: []byte("helloworld"), + }, + }}, []*pb.WatchResponse{}, }, @@ -84,7 +88,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ Key: []byte("foo"), - RangeEnd: []byte("fop")}}}, + RangeEnd: []byte("fop"), + }, + }}, []*pb.WatchResponse{ { @@ -105,7 +111,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ Key: []byte("helloworld"), - RangeEnd: []byte("helloworle")}}}, + RangeEnd: []byte("helloworle"), + }, + }}, []*pb.WatchResponse{}, }, @@ -115,7 +123,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ Key: []byte(""), - RangeEnd: []byte("\x00")}}}, + RangeEnd: []byte("\x00"), + }, + }}, []*pb.WatchResponse{ { @@ -135,7 +145,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { []string{"foo", "foo", "foo"}, &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}}, + Key: []byte("foo"), + }, + }}, []*pb.WatchResponse{ { @@ -176,7 +188,9 @@ func TestV3WatchFromCurrentRevision(t *testing.T) { &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ Key: []byte("foo"), - RangeEnd: []byte("fop")}}}, + RangeEnd: []byte("fop"), + }, + }}, []*pb.WatchResponse{ { @@ -316,7 +330,8 @@ func TestV3WatchFutureRevision(t *testing.T) { wkey := []byte("foo") wrev := int64(10) req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev}}} + CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev}, + }} err = wStream.Send(req) if err != nil { t.Fatalf("wStream.Send error: %v", err) @@ -385,7 +400,8 @@ func TestV3WatchWrongRange(t *testing.T) { } for i, tt := range tests { if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: tt.key, RangeEnd: tt.end, StartRevision: 1}}}); err != nil { + CreateRequest: &pb.WatchCreateRequest{Key: tt.key, RangeEnd: tt.end, StartRevision: 1}, + }}); err != nil { t.Fatalf("#%d: wStream.Send error: %v", i, err) } cresp, err := wStream.Recv() @@ -429,7 +445,9 @@ func testV3WatchCancel(t *testing.T, startRev int64) { wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} + Key: []byte("foo"), StartRevision: startRev, + }, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("wStream.Send error: %v", err) } @@ -444,7 +462,9 @@ func testV3WatchCancel(t *testing.T, startRev int64) { creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{ CancelRequest: &pb.WatchCancelRequest{ - WatchId: wresp.WatchId}}} + WatchId: wresp.WatchId, + }, + }} if err := wStream.Send(creq); err != nil { t.Fatalf("wStream.Send error: %v", err) } @@ -503,7 +523,8 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) { progress := make(map[int64]int64) wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}}} + CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("first watch request failed (%v)", err) } @@ -570,20 +591,18 @@ func TestV3WatchEmptyKey(t *testing.T) { } req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + Key: []byte("foo"), + }, + }} + require.NoError(t, ws.Send(req)) + _, err := ws.Recv() + require.NoError(t, err) // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) // check received PUT resp, rerr := ws.Recv() @@ -634,11 +653,15 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { if i < watchKeyN { wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} + Key: []byte("foo"), StartRevision: startRev, + }, + }} } else { wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev}}} + Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev, + }, + }} } if err := wStream.Send(wreq); err != nil { t.Fatalf("wStream.Send error: %v", err) @@ -730,7 +753,9 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev}}} + Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev, + }, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("wStream.Send error: %v", err) } @@ -744,7 +769,9 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { ru := &pb.RequestOp{} ru.Request = &pb.RequestOp_RequestPut{ RequestPut: &pb.PutRequest{ - Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}} + Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar"), + }, + } txn.Success = append(txn.Success, ru) } @@ -822,7 +849,9 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1}}} + Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1, + }, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("wStream.Send error: %v", err) } @@ -1012,7 +1041,9 @@ func testV3WatchMultipleStreams(t *testing.T, startRev int64) { } wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} + Key: []byte("foo"), StartRevision: startRev, + }, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("wStream.Send error: %v", err) } @@ -1111,12 +1142,14 @@ func TestWatchWithProgressNotify(t *testing.T) { // create two watchers, one with progressNotify set. wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}}} + CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("watch request failed (%v)", err) } wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}}} + CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}, + }} if err := wStream.Send(wreq); err != nil { t.Fatalf("watch request failed (%v)", err) } @@ -1171,7 +1204,9 @@ func TestV3WatchClose(t *testing.T) { cr := &pb.WatchCreateRequest{Key: []byte("a")} req := &pb.WatchRequest{ RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: cr}} + CreateRequest: cr, + }, + } ws.Send(req) ws.Recv() }() @@ -1199,13 +1234,11 @@ func TestV3WatchWithFilter(t *testing.T) { CreateRequest: &pb.WatchCreateRequest{ Key: []byte("foo"), Filters: []pb.WatchCreateRequest_FilterType{pb.WatchCreateRequest_NOPUT}, - }}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + }, + }} + require.NoError(t, ws.Send(req)) + _, err := ws.Recv() + require.NoError(t, err) recv := make(chan *pb.WatchResponse, 1) go func() { @@ -1220,9 +1253,8 @@ func TestV3WatchWithFilter(t *testing.T) { // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) select { case <-recv: @@ -1231,9 +1263,8 @@ func TestV3WatchWithFilter(t *testing.T) { } dreq := &pb.DeleteRangeRequest{Key: []byte("foo")} - if _, err := kvc.DeleteRange(context.TODO(), dreq); err != nil { - t.Fatal(err) - } + _, err = kvc.DeleteRange(context.TODO(), dreq) + require.NoError(t, err) select { case resp := <-recv: @@ -1287,7 +1318,8 @@ func TestV3WatchWithPrevKV(t *testing.T) { Key: []byte(tt.key), RangeEnd: []byte(tt.end), PrevKv: true, - }}} + }, + }} err = ws.Send(req) require.NoError(t, err) _, err = ws.Recv() @@ -1345,9 +1377,7 @@ func TestV3WatchCancellation(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { @@ -1384,9 +1414,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { @@ -1434,7 +1462,7 @@ func TestV3WatchProgressWaitsForSync(t *testing.T) { // Immediately request a progress notification. As the client // is unsynchronised, the server will not sent any notification, - //as client can infer progress from events. + // as client can infer progress from events. err := client.RequestProgress(ctx) require.NoError(t, err) diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go index d0ca72b4255..cf5c7cb3427 100644 --- a/tests/integration/v3election_grpc_test.go +++ b/tests/integration/v3election_grpc_test.go @@ -35,13 +35,9 @@ func TestV3ElectionCampaign(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} @@ -129,13 +125,9 @@ func TestV3ElectionObserve(t *testing.T) { } lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) c1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("0")}) - if cerr1 != nil { - t.Fatal(cerr1) - } + require.NoError(t, cerr1) // overlap other leader so it waits on resign leader2c := make(chan struct{}) diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go index f293bc1a556..fe2e161cded 100644 --- a/tests/integration/v3lock_grpc_test.go +++ b/tests/integration/v3lock_grpc_test.go @@ -19,6 +19,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -32,13 +34,9 @@ func TestV3LockLockWaiter(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID})