From 1523e7561e21595dd9f266c223dd94f8562496f5 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Fri, 16 Aug 2024 09:52:47 -0500 Subject: [PATCH 1/5] Add an interface for querying available Kubernetes APIs --- internal/kubernetes/apis.go | 60 +++++++++++++++++++++++++++++ internal/kubernetes/apis_test.go | 66 ++++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) create mode 100644 internal/kubernetes/apis.go create mode 100644 internal/kubernetes/apis_test.go diff --git a/internal/kubernetes/apis.go b/internal/kubernetes/apis.go new file mode 100644 index 000000000..2ddd0c4b5 --- /dev/null +++ b/internal/kubernetes/apis.go @@ -0,0 +1,60 @@ +// Copyright 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// API is a combination of Group, Version, and Kind that can be used to check +// what is available in the Kubernetes API. There are four ways to populate it: +// 1. Group without Version nor Kind means any resource in that Group. +// 2. Group with Version but no Kind means any resource in that GV. +// 3. Group with Kind but no Version means that Kind in any Version of the Group. +// 4. Group with Version and Kind means that exact GVK. +type API = schema.GroupVersionKind + +type APIs interface { + Has(API) bool + HasAll(...API) bool + HasAny(...API) bool +} + +// APISet implements [APIs] using empty struct for minimal memory consumption. +type APISet = sets.Set[API] + +func NewAPISet(api ...API) APISet { + // Start with everything that's passed in; full GVKs are here. + s := sets.New(api...) + + // Add the other combinations; Group, GV, and GK. + for i := range api { + s.Insert( + API{Group: api[i].Group}, + API{Group: api[i].Group, Version: api[i].Version}, + API{Group: api[i].Group, Kind: api[i].Kind}, + ) + } + + return s +} + +type apiContextKey struct{} + +// Has returns true when api was previously stored by [NewAPIContext]. +func Has(ctx context.Context, api API) bool { + if i, ok := ctx.Value(apiContextKey{}).(interface{ Has(API) bool }); ok { + return i.Has(api) + } + return false +} + +// NewAPIContext returns a copy of ctx containing apis. Interrogate it using [Has]. +func NewAPIContext(ctx context.Context, apis APIs) context.Context { + return context.WithValue(ctx, apiContextKey{}, apis) +} diff --git a/internal/kubernetes/apis_test.go b/internal/kubernetes/apis_test.go new file mode 100644 index 000000000..8048c7056 --- /dev/null +++ b/internal/kubernetes/apis_test.go @@ -0,0 +1,66 @@ +// Copyright 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" +) + +func TestAPISet(t *testing.T) { + t.Parallel() + + var zero APISet + assert.Assert(t, !zero.Has(API{Group: "security.openshift.io"})) + assert.Assert(t, !zero.Has(API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"})) + assert.Assert(t, !zero.HasAll(API{Group: "security.openshift.io"}, API{Group: "snapshot.storage.k8s.io"})) + assert.Assert(t, !zero.HasAny(API{Group: "security.openshift.io"}, API{Group: "snapshot.storage.k8s.io"})) + + empty := NewAPISet() + assert.Assert(t, !empty.Has(API{Group: "security.openshift.io"})) + assert.Assert(t, !empty.Has(API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"})) + + one := NewAPISet( + API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"}, + ) + assert.Assert(t, one.Has(API{Group: "security.openshift.io"})) + assert.Assert(t, one.Has(API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"})) + assert.Assert(t, !one.HasAll(API{Group: "snapshot.storage.k8s.io"}, API{Group: "security.openshift.io"})) + assert.Assert(t, !one.HasAny(API{Group: "snapshot.storage.k8s.io"})) + assert.Assert(t, one.HasAny(API{Group: "snapshot.storage.k8s.io"}, API{Group: "security.openshift.io"})) + + two := NewAPISet( + API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"}, + API{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshot"}, + ) + assert.Assert(t, two.Has(API{Group: "security.openshift.io"})) + assert.Assert(t, two.Has(API{Group: "snapshot.storage.k8s.io"})) + assert.Assert(t, two.HasAll(API{Group: "snapshot.storage.k8s.io"}, API{Group: "security.openshift.io"})) + assert.Assert(t, two.HasAny(API{Group: "snapshot.storage.k8s.io"})) + assert.Assert(t, two.HasAny(API{Group: "snapshot.storage.k8s.io"}, API{Group: "security.openshift.io"})) +} + +func TestAPIContext(t *testing.T) { + t.Parallel() + + // The background context always return false. + ctx := context.Background() + + assert.Assert(t, !Has(ctx, API{Group: "security.openshift.io"})) + assert.Assert(t, !Has(ctx, API{Group: "snapshot.storage.k8s.io"})) + + // An initialized context returns what is stored. + set := NewAPISet(API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"}) + ctx = NewAPIContext(ctx, set) + + assert.Assert(t, Has(ctx, API{Group: "security.openshift.io"})) + assert.Assert(t, !Has(ctx, API{Group: "snapshot.storage.k8s.io"})) + + // The stored value is mutable within the context. + set[API{Group: "snapshot.storage.k8s.io"}] = struct{}{} + assert.Assert(t, Has(ctx, API{Group: "snapshot.storage.k8s.io"})) +} From 75a863605d0996668771c41fc723e4d408519ccf Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 13 Nov 2024 11:53:56 -0600 Subject: [PATCH 2/5] Add a Runner that watches for Kubernetes APIs --- cmd/postgres-operator/main.go | 45 ++---- internal/kubernetes/discovery.go | 208 ++++++++++++++++++++++++++ internal/kubernetes/discovery_test.go | 55 +++++++ internal/registration/runner.go | 1 + 4 files changed, 274 insertions(+), 35 deletions(-) create mode 100644 internal/kubernetes/discovery.go create mode 100644 internal/kubernetes/discovery_test.go diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index b2f8ae49b..8c2df38fd 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -16,7 +16,6 @@ import ( "go.opentelemetry.io/otel" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -28,6 +27,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/standalone_pgadmin" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" @@ -146,6 +146,10 @@ func main() { // deprecation warnings when using an older version of a resource for backwards compatibility). rest.SetDefaultWarningHandler(rest.NoWarnings{}) + k8s, err := kubernetes.NewDiscoveryRunner(cfg) + assertNoError(err) + assertNoError(k8s.Read(ctx)) + options, err := initManager() assertNoError(err) @@ -159,11 +163,12 @@ func main() { mgr, err := runtime.NewManager(cfg, options) assertNoError(err) + assertNoError(mgr.Add(k8s)) - openshift := isOpenshift(cfg) - if openshift { - log.Info("detected OpenShift environment") - } + openshift := k8s.Has(kubernetes.API{ + Group: "security.openshift.io", Kind: "SecurityContextConstraints", + }) + log.Info("Connected to Kubernetes", "api", k8s.Version().String(), "openshift", openshift) registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) assertNoError(err) @@ -270,33 +275,3 @@ func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Lo os.Exit(1) } } - -func isOpenshift(cfg *rest.Config) bool { - const sccGroupName, sccKind = "security.openshift.io", "SecurityContextConstraints" - - client, err := discovery.NewDiscoveryClientForConfig(cfg) - assertNoError(err) - - groups, err := client.ServerGroups() - if err != nil { - assertNoError(err) - } - for _, g := range groups.Groups { - if g.Name != sccGroupName { - continue - } - for _, v := range g.Versions { - resourceList, err := client.ServerResourcesForGroupVersion(v.GroupVersion) - if err != nil { - assertNoError(err) - } - for _, r := range resourceList.APIResources { - if r.Kind == sccKind { - return true - } - } - } - } - - return false -} diff --git a/internal/kubernetes/discovery.go b/internal/kubernetes/discovery.go new file mode 100644 index 000000000..89ed07f75 --- /dev/null +++ b/internal/kubernetes/discovery.go @@ -0,0 +1,208 @@ +// Copyright 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "context" + "errors" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + "github.com/crunchydata/postgres-operator/internal/logging" +) + +type Version = version.Info + +// DiscoveryRunner implements [APIs] by reading from a Kubernetes API client. +// Its methods are safe to call concurrently. +type DiscoveryRunner struct { + // NOTE(tracing): The methods of [discovery.DiscoveryClient] do not take + // a Context so their API calls won't have a parent span. + Client interface { + ServerGroups() (*metav1.APIGroupList, error) + ServerResourcesForGroupVersion(string) (*metav1.APIResourceList, error) + ServerVersion() (*version.Info, error) + } + + refresh time.Duration + + // relevant is the list of APIs to examine during Read. + // Has, HasAll, and HasAny return false when this is empty. + relevant []API + + have struct { + sync.RWMutex + APISet + Version + } +} + +// NewDiscoveryRunner creates a [DiscoveryRunner] that periodically reads from +// the Kubernetes at config. +func NewDiscoveryRunner(config *rest.Config) (*DiscoveryRunner, error) { + dc, err := discovery.NewDiscoveryClientForConfig(config) + + runner := &DiscoveryRunner{ + Client: dc, + refresh: 10 * time.Minute, + relevant: []API{ + // https://cert-manager.io/docs/usage/certificate + // https://cert-manager.io/docs/trust/trust-manager + {Group: "cert-manager.io", Kind: "Certificate"}, + {Group: "trust.cert-manager.io", Kind: "Bundle"}, + + // https://gateway-api.sigs.k8s.io/api-types/referencegrant + // https://kep.k8s.io/3766 + {Group: "gateway.networking.k8s.io", Kind: "ReferenceGrant"}, + + // https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html + {Group: "security.openshift.io", Kind: "SecurityContextConstraints"}, + + // https://docs.k8s.io/concepts/storage/volume-snapshots + {Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshot"}, + }, + } + + return runner, err +} + +// Has returns true when api is available in Kuberentes. +func (r *DiscoveryRunner) Has(api API) bool { return r.HasAny(api) } + +// HasAll returns true when every api is available in Kubernetes. +func (r *DiscoveryRunner) HasAll(api ...API) bool { + r.have.RLock() + defer r.have.RUnlock() + return r.have.HasAll(api...) +} + +// HasAny returns true when at least one api is available in Kubernetes. +func (r *DiscoveryRunner) HasAny(api ...API) bool { + r.have.RLock() + defer r.have.RUnlock() + return r.have.HasAny(api...) +} + +// NeedLeaderElection returns false so that r runs on any [manager.Manager], +// regardless of which is elected leader in the Kubernetes namespace. +func (r *DiscoveryRunner) NeedLeaderElection() bool { return false } + +// Read fetches available APIs from Kubernetes. +func (r *DiscoveryRunner) Read(ctx context.Context) error { + return errors.Join(r.readAPIs(ctx), r.readVersion()) +} + +func (r *DiscoveryRunner) readAPIs(ctx context.Context) error { + // Build an index of the APIs we want to know about. + wantAPIs := make(map[string]map[string]sets.Set[string]) + for _, want := range r.relevant { + if wantAPIs[want.Group] == nil { + wantAPIs[want.Group] = make(map[string]sets.Set[string]) + } + if wantAPIs[want.Group][want.Version] == nil { + wantAPIs[want.Group][want.Version] = sets.New[string]() + } + if want.Kind != "" { + wantAPIs[want.Group][want.Version].Insert(want.Kind) + } + } + + // Fetch Groups and Versions from Kubernetes. + groups, err := r.Client.ServerGroups() + if err != nil { + return err + } + + // Build an index of the Groups and GVs available in Kubernetes; + // add GK and GVK for resources that we want to know about. + haveAPIs := make(APISet) + for _, apiG := range groups.Groups { + haveG := apiG.Name + haveAPIs.Insert(API{Group: haveG}) + + for _, apiGV := range apiG.Versions { + haveV := apiGV.Version + haveAPIs.Insert(API{Group: haveG, Version: haveV}) + + // Only fetch Resources when there are Kinds we want to know about. + if wantAPIs[haveG][""].Len() == 0 && wantAPIs[haveG][haveV].Len() == 0 { + continue + } + + resources, err := r.Client.ServerResourcesForGroupVersion(apiGV.GroupVersion) + if err != nil { + return err + } + + for _, apiR := range resources.APIResources { + haveK := apiR.Kind + haveAPIs.Insert( + API{Group: haveG, Kind: haveK}, + API{Group: haveG, Kind: haveK, Version: haveV}, + ) + } + } + } + + r.have.Lock() + r.have.APISet = haveAPIs + r.have.Unlock() + + r.have.RLock() + defer r.have.RUnlock() + logging.FromContext(ctx).V(1).Info("Found APIs", "index_size", r.have.APISet.Len()) + + return nil +} + +func (r *DiscoveryRunner) readVersion() error { + info, err := r.Client.ServerVersion() + + if info != nil && err == nil { + r.have.Lock() + r.have.Version = *info + r.have.Unlock() + } + + return err +} + +// Start periodically reads the Kuberentes API. It blocks until ctx is cancelled. +func (r *DiscoveryRunner) Start(ctx context.Context) error { + ticker := time.NewTicker(r.refresh) + defer ticker.Stop() + + log := logging.FromContext(ctx).WithValues("controller", "kubernetes") + ctx = logging.NewContext(ctx, log) + + for { + select { + case <-ticker.C: + if err := r.Read(ctx); err != nil { + log.Error(err, "Unable to detect Kubernetes APIs") + } + case <-ctx.Done(): + // TODO(controller-runtime): Fixed in v0.19.0 + // https://github.com/kubernetes-sigs/controller-runtime/issues/1927 + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return ctx.Err() + } + } +} + +// Version returns the detected version of Kubernetes. +func (r *DiscoveryRunner) Version() Version { + r.have.RLock() + defer r.have.RUnlock() + return r.have.Version +} diff --git a/internal/kubernetes/discovery_test.go b/internal/kubernetes/discovery_test.go new file mode 100644 index 000000000..8bbe62013 --- /dev/null +++ b/internal/kubernetes/discovery_test.go @@ -0,0 +1,55 @@ +// Copyright 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestDiscoveryRunnerInterfaces(t *testing.T) { + var _ APIs = new(DiscoveryRunner) + var _ manager.Runnable = new(DiscoveryRunner) + + var runnable manager.LeaderElectionRunnable = new(DiscoveryRunner) + assert.Assert(t, false == runnable.NeedLeaderElection()) +} + +func TestDiscoveryRunnerAPIs(t *testing.T) { + ctx := context.Background() + cfg, _ := require.Kubernetes2(t) + require.ParallelCapacity(t, 0) + + runner, err := NewDiscoveryRunner(cfg) + assert.NilError(t, err) + + // Search for an API that should always exist. + runner.relevant = append(runner.relevant, API{Kind: "Pod"}) + assert.NilError(t, runner.readAPIs(ctx)) + + assert.Assert(t, runner.Has(API{Kind: "Pod"})) + assert.Assert(t, runner.HasAll(API{Kind: "Pod"}, API{Kind: "Secret"})) + assert.Assert(t, runner.HasAny(API{Kind: "Pod"}, API{Kind: "NotGonnaExist"})) + assert.Assert(t, !runner.Has(API{Kind: "NotGonnaExist"})) +} + +func TestDiscoveryRunnerVersion(t *testing.T) { + cfg, _ := require.Kubernetes2(t) + require.ParallelCapacity(t, 0) + + runner, err := NewDiscoveryRunner(cfg) + assert.NilError(t, err) + assert.NilError(t, runner.readVersion()) + + version := runner.Version() + assert.Assert(t, version.Major != "", "got %#v", version) + assert.Assert(t, version.Minor != "", "got %#v", version) + assert.Assert(t, version.String() != "", "got %q", version.String()) +} diff --git a/internal/registration/runner.go b/internal/registration/runner.go index 0d607e1e9..5b340ddaf 100644 --- a/internal/registration/runner.go +++ b/internal/registration/runner.go @@ -181,6 +181,7 @@ func (r *Runner) Start(ctx context.Context) error { r.changed() } case <-ctx.Done(): + // TODO(controller-runtime): Fixed in v0.19.0 // https://github.com/kubernetes-sigs/controller-runtime/issues/1927 if errors.Is(ctx.Err(), context.Canceled) { return nil From a77c14196176451f71523b6a2bc95d5cd38b37b9 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 13 Nov 2024 15:21:03 -0600 Subject: [PATCH 3/5] Use the discovery runner during upgrade check --- cmd/postgres-operator/main.go | 11 ++-- internal/kubernetes/discovery.go | 25 +++++++++ internal/kubernetes/discovery_test.go | 23 ++++++++ internal/upgradecheck/header.go | 31 ++--------- internal/upgradecheck/header_test.go | 79 +++++---------------------- internal/upgradecheck/helpers_test.go | 29 ---------- internal/upgradecheck/http.go | 17 ++---- internal/upgradecheck/http_test.go | 15 ++--- 8 files changed, 80 insertions(+), 150 deletions(-) diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 8c2df38fd..5e5849ac7 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -150,6 +150,8 @@ func main() { assertNoError(err) assertNoError(k8s.Read(ctx)) + log.Info("Connected to Kubernetes", "api", k8s.Version().String(), "openshift", k8s.IsOpenShift()) + options, err := initManager() assertNoError(err) @@ -158,6 +160,7 @@ func main() { options.BaseContext = func() context.Context { ctx := context.Background() ctx = feature.NewContext(ctx, features) + ctx = kubernetes.NewAPIContext(ctx, k8s) return ctx } @@ -165,18 +168,13 @@ func main() { assertNoError(err) assertNoError(mgr.Add(k8s)) - openshift := k8s.Has(kubernetes.API{ - Group: "security.openshift.io", Kind: "SecurityContextConstraints", - }) - log.Info("Connected to Kubernetes", "api", k8s.Version().String(), "openshift", openshift) - registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) assertNoError(err) assertNoError(mgr.Add(registrar)) token, _ := registrar.CheckToken() // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(mgr, openshift, log, registrar) + addControllersToManager(mgr, k8s.IsOpenShift(), log, registrar) if features.Enabled(feature.BridgeIdentifiers) { constructor := func() *bridge.Client { @@ -196,7 +194,6 @@ func main() { assertNoError( upgradecheck.ManagedScheduler( mgr, - openshift, os.Getenv("CHECK_FOR_UPGRADES_URL"), versionString, token, diff --git a/internal/kubernetes/discovery.go b/internal/kubernetes/discovery.go index 89ed07f75..ab188c5f6 100644 --- a/internal/kubernetes/discovery.go +++ b/internal/kubernetes/discovery.go @@ -91,6 +91,12 @@ func (r *DiscoveryRunner) HasAny(api ...API) bool { return r.have.HasAny(api...) } +// IsOpenShift returns true if this Kubernetes might be OpenShift. The result +// may not be accurate. +func (r *DiscoveryRunner) IsOpenShift() bool { + return r.Has(API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"}) +} + // NeedLeaderElection returns false so that r runs on any [manager.Manager], // regardless of which is elected leader in the Kubernetes namespace. func (r *DiscoveryRunner) NeedLeaderElection() bool { return false } @@ -206,3 +212,22 @@ func (r *DiscoveryRunner) Version() Version { defer r.have.RUnlock() return r.have.Version } + +// IsOpenShift returns true if the detected Kubernetes might be OpenShift. +// The result may not be accurate. When possible, use another technique to +// detect specific behavior. Use [Has] to check for specific APIs. +func IsOpenShift(ctx context.Context) bool { + if i, ok := ctx.Value(apiContextKey{}).(interface{ IsOpenShift() bool }); ok { + return i.IsOpenShift() + } + return false +} + +// VersionString returns a textual representation of the detected Kubernetes +// version, if any. +func VersionString(ctx context.Context) string { + if i, ok := ctx.Value(apiContextKey{}).(interface{ Version() Version }); ok { + return i.Version().String() + } + return "" +} diff --git a/internal/kubernetes/discovery_test.go b/internal/kubernetes/discovery_test.go index 8bbe62013..a6f5a26df 100644 --- a/internal/kubernetes/discovery_test.go +++ b/internal/kubernetes/discovery_test.go @@ -9,6 +9,7 @@ import ( "testing" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/version" "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/crunchydata/postgres-operator/internal/testing/require" @@ -53,3 +54,25 @@ func TestDiscoveryRunnerVersion(t *testing.T) { assert.Assert(t, version.Minor != "", "got %#v", version) assert.Assert(t, version.String() != "", "got %q", version.String()) } + +func TestIsOpenShift(t *testing.T) { + ctx := context.Background() + assert.Assert(t, !IsOpenShift(ctx)) + + runner := new(DiscoveryRunner) + runner.have.APISet = NewAPISet( + API{Group: "security.openshift.io", Kind: "SecurityContextConstraints"}, + ) + assert.Assert(t, IsOpenShift(NewAPIContext(ctx, runner))) +} + +func TestVersionString(t *testing.T) { + ctx := context.Background() + assert.Equal(t, "", VersionString(ctx)) + + runner := new(DiscoveryRunner) + runner.have.Version = version.Info{ + Major: "1", Minor: "2", GitVersion: "asdf", + } + assert.Equal(t, "asdf", VersionString(NewAPIContext(ctx, runner))) +} diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go index 5dc774a1d..582caf0d3 100644 --- a/internal/upgradecheck/header.go +++ b/internal/upgradecheck/header.go @@ -14,12 +14,11 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -51,16 +50,16 @@ type clientUpgradeData struct { // generateHeader aggregates data and returns a struct of that data // If any errors are encountered, it logs those errors and uses the default values -func generateHeader(ctx context.Context, cfg *rest.Config, crClient crclient.Client, - pgoVersion string, isOpenShift bool, registrationToken string) *clientUpgradeData { +func generateHeader(ctx context.Context, crClient crclient.Client, + pgoVersion string, registrationToken string) *clientUpgradeData { return &clientUpgradeData{ BridgeClustersTotal: getBridgeClusters(ctx, crClient), BuildSource: os.Getenv("BUILD_SOURCE"), DeploymentID: ensureDeploymentID(ctx, crClient), FeatureGatesEnabled: feature.ShowGates(ctx), - IsOpenShift: isOpenShift, - KubernetesEnv: getServerVersion(ctx, cfg), + IsOpenShift: kubernetes.IsOpenShift(ctx), + KubernetesEnv: kubernetes.VersionString(ctx), PGOClustersTotal: getManagedClusters(ctx, crClient), PGOInstaller: os.Getenv("PGO_INSTALLER"), PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), @@ -189,26 +188,6 @@ func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { return count } -// getServerVersion returns the stringified server version (i.e., the same info `kubectl version` -// returns for the server) -// Any errors encountered will be logged and will return an empty string -func getServerVersion(ctx context.Context, cfg *rest.Config) string { - log := logging.FromContext(ctx) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - log.V(1).Info("upgrade check issue: could not retrieve discovery client", - "response", err.Error()) - return "" - } - versionInfo, err := discoveryClient.ServerVersion() - if err != nil { - log.V(1).Info("upgrade check issue: could not retrieve server version", - "response", err.Error()) - return "" - } - return versionInfo.String() -} - func addHeader(req *http.Request, upgradeInfo *clientUpgradeData) *http.Request { marshaled, _ := json.Marshal(upgradeInfo) req.Header.Add(clientHeader, string(marshaled)) diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go index 63c8d4b99..39d3a9abd 100644 --- a/internal/upgradecheck/header_test.go +++ b/internal/upgradecheck/header_test.go @@ -14,14 +14,12 @@ import ( "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/client-go/discovery" // Google Kubernetes Engine / Google Cloud Platform authentication provider _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/rest" - "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" @@ -33,12 +31,10 @@ func TestGenerateHeader(t *testing.T) { ctx := context.Background() cfg, cc := require.Kubernetes2(t) - dc, err := discovery.NewDiscoveryClientForConfig(cfg) + discovery, err := kubernetes.NewDiscoveryRunner(cfg) assert.NilError(t, err) - server, err := dc.ServerVersion() - assert.NilError(t, err) - - reconciler := postgrescluster.Reconciler{Client: cc} + assert.NilError(t, discovery.Read(ctx)) + ctx = kubernetes.NewAPIContext(ctx, discovery) t.Setenv("PGO_INSTALLER", "test") t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") @@ -51,11 +47,10 @@ func TestGenerateHeader(t *testing.T) { } ctx, calls := setupLogCapture(ctx) - res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift, "") + res := generateHeader(ctx, fakeClientWithOptionalError, "1.2.3", "") assert.Equal(t, len(*calls), 1) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) pgoList := v1beta1.PostgresClusterList{} err := cc.List(ctx, &pgoList) @@ -66,7 +61,7 @@ func TestGenerateHeader(t *testing.T) { assert.NilError(t, err) assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) assert.Equal(t, "test", res.PGOInstaller) assert.Equal(t, "test-origin", res.PGOInstallerOrigin) assert.Equal(t, "developer", res.BuildSource) @@ -78,40 +73,18 @@ func TestGenerateHeader(t *testing.T) { } ctx, calls := setupLogCapture(ctx) - res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift, "") + res := generateHeader(ctx, fakeClientWithOptionalError, "1.2.3", "") assert.Equal(t, len(*calls), 2) // Aggregating the logs since we cannot determine which call will be first callsAggregate := strings.Join(*calls, " ") assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) assert.Equal(t, 0, res.PGOClustersTotal) assert.Equal(t, 0, res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("error getting server version info", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - badcfg := &rest.Config{} - - res := generateHeader(ctx, badcfg, cc, - "1.2.3", reconciler.IsOpenShift, "") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, "", res.KubernetesEnv) + assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) assert.Equal(t, "test", res.PGOInstaller) assert.Equal(t, "test-origin", res.PGOInstallerOrigin) assert.Equal(t, "developer", res.BuildSource) @@ -125,17 +98,16 @@ func TestGenerateHeader(t *testing.T) { })) ctx = feature.NewContext(ctx, gate) - res := generateHeader(ctx, cfg, cc, - "1.2.3", reconciler.IsOpenShift, "") + res := generateHeader(ctx, cc, "1.2.3", "") assert.Equal(t, len(*calls), 0) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) pgoList := v1beta1.PostgresClusterList{} err := cc.List(ctx, &pgoList) assert.NilError(t, err) assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) assert.Equal(t, "TablespaceVolumes=true", res.FeatureGatesEnabled) assert.Equal(t, "test", res.PGOInstaller) assert.Equal(t, "test-origin", res.PGOInstallerOrigin) @@ -561,31 +533,6 @@ func TestGetBridgeClusters(t *testing.T) { }) } -func TestGetServerVersion(t *testing.T) { - t.Run("success", func(t *testing.T) { - expect, server := setupVersionServer(t, true) - ctx, calls := setupLogCapture(context.Background()) - - got := getServerVersion(ctx, &rest.Config{ - Host: server.URL, - }) - assert.Equal(t, len(*calls), 0) - assert.Equal(t, got, expect.String()) - }) - - t.Run("failure", func(t *testing.T) { - _, server := setupVersionServer(t, false) - ctx, calls := setupLogCapture(context.Background()) - - got := getServerVersion(ctx, &rest.Config{ - Host: server.URL, - }) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) - assert.Equal(t, got, "") - }) -} - func TestAddHeader(t *testing.T) { t.Run("successful", func(t *testing.T) { req := &http.Request{ diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go index abef591e5..a273741f7 100644 --- a/internal/upgradecheck/helpers_test.go +++ b/internal/upgradecheck/helpers_test.go @@ -6,17 +6,13 @@ package upgradecheck import ( "context" - "encoding/json" "fmt" - "net/http" - "net/http/httptest" "testing" "github.com/go-logr/logr/funcr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/version" crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -117,31 +113,6 @@ func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Cl return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() } -// setupVersionServer sets up and tears down a server and version info for testing -func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Server) { - t.Helper() - expect := version.Info{ - Major: "1", - Minor: "22", - GitCommit: "v1.22.2", - } - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, - req *http.Request) { - if works { - output, _ := json.Marshal(expect) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - // We don't need to check the error output from this - _, _ = w.Write(output) - } else { - w.WriteHeader(http.StatusBadRequest) - } - })) - t.Cleanup(server.Close) - - return expect, server -} - // setupLogCapture captures the logs and keeps count of the logs captured func setupLogCapture(ctx context.Context) (context.Context, *[]string) { calls := []string{} diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go index 339ce1700..35911b0cb 100644 --- a/internal/upgradecheck/http.go +++ b/internal/upgradecheck/http.go @@ -13,7 +13,6 @@ import ( "github.com/golang-jwt/jwt/v5" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -66,8 +65,8 @@ func init() { } func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, - crclient crclient.Client, cfg *rest.Config, - isOpenShift bool, registrationToken string) (message string, header string, err error) { + crclient crclient.Client, registrationToken string, +) (message string, header string, err error) { var headerPayloadStruct *clientUpgradeData // Prep request @@ -75,8 +74,8 @@ func checkForUpgrades(ctx context.Context, url, versionString string, backoff wa if err == nil { // generateHeader always returns some sort of struct, using defaults/nil values // in case some of the checks return errors - headerPayloadStruct = generateHeader(ctx, cfg, crclient, - versionString, isOpenShift, registrationToken) + headerPayloadStruct = generateHeader(ctx, crclient, + versionString, registrationToken) req = addHeader(req, headerPayloadStruct) } @@ -124,9 +123,7 @@ func checkForUpgrades(ctx context.Context, url, versionString string, backoff wa type CheckForUpgradesScheduler struct { Client crclient.Client - Config *rest.Config - OpenShift bool Refresh time.Duration RegistrationToken string URL, Version string @@ -138,7 +135,7 @@ type CheckForUpgradesScheduler struct { // so this token is always current; but if that restart behavior is changed, // we will want the upgrade mechanism to instantiate its own registration runner // or otherwise get the most recent token. -func ManagedScheduler(m manager.Manager, openshift bool, +func ManagedScheduler(m manager.Manager, url, version string, registrationToken *jwt.Token) error { if url == "" { url = upgradeCheckURL @@ -151,8 +148,6 @@ func ManagedScheduler(m manager.Manager, openshift bool, return m.Add(&CheckForUpgradesScheduler{ Client: m.GetClient(), - Config: m.GetConfig(), - OpenShift: openshift, Refresh: 24 * time.Hour, RegistrationToken: token, URL: url, @@ -191,7 +186,7 @@ func (s *CheckForUpgradesScheduler) check(ctx context.Context) { }() info, header, err := checkForUpgrades(ctx, - s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift, s.RegistrationToken) + s.URL, s.Version, backoff, s.Client, s.RegistrationToken) if err != nil { log.V(1).Info("could not complete upgrade check", "response", err.Error()) diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go index 9535f942e..23d36bea1 100644 --- a/internal/upgradecheck/http_test.go +++ b/internal/upgradecheck/http_test.go @@ -18,7 +18,6 @@ import ( "github.com/go-logr/logr/funcr" "gotest.tools/v3/assert" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/crunchydata/postgres-operator/internal/feature" @@ -49,7 +48,6 @@ func (m *MockClient) Do(req *http.Request) (*http.Response, error) { func TestCheckForUpgrades(t *testing.T) { fakeClient := setupFakeClientWithPGOScheme(t, true) - cfg := &rest.Config{} ctx := logging.NewContext(context.Background(), logging.Discard()) gate := feature.NewGate() @@ -83,7 +81,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") + fakeClient, "speakFriend") assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) checkData(t, header) @@ -98,7 +96,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") + fakeClient, "speakFriend") // Two failed calls because of env var assert.Equal(t, counter, 2) assert.Equal(t, res, "") @@ -118,7 +116,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") + fakeClient, "speakFriend") assert.Equal(t, res, "") // Two failed calls because of env var assert.Equal(t, counter, 2) @@ -147,7 +145,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") + fakeClient, "speakFriend") assert.Equal(t, counter, 2) assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) @@ -158,9 +156,6 @@ func TestCheckForUpgrades(t *testing.T) { // TODO(benjaminjb): Replace `fake` with envtest func TestCheckForUpgradesScheduler(t *testing.T) { fakeClient := setupFakeClientWithPGOScheme(t, false) - _, server := setupVersionServer(t, true) - defer server.Close() - cfg := &rest.Config{Host: server.URL} t.Run("panic from checkForUpgrades doesn't bubble up", func(t *testing.T) { ctx := context.Background() @@ -180,7 +175,6 @@ func TestCheckForUpgradesScheduler(t *testing.T) { s := CheckForUpgradesScheduler{ Client: fakeClient, - Config: cfg, } s.check(ctx) @@ -213,7 +207,6 @@ func TestCheckForUpgradesScheduler(t *testing.T) { defer cancel() s := CheckForUpgradesScheduler{ Client: fakeClient, - Config: cfg, Refresh: 1 * time.Second, } assert.ErrorIs(t, context.DeadlineExceeded, s.Start(ctx)) From b4d7444a363d7717a87c48ff12343a768f0160b4 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 13 Nov 2024 15:50:38 -0600 Subject: [PATCH 4/5] Use the discovery runner in controllers --- cmd/postgres-operator/main.go | 12 ++--- .../controller/postgrescluster/controller.go | 47 +++---------------- .../controller/postgrescluster/snapshots.go | 14 +++--- .../postgrescluster/snapshots_test.go | 38 +++++++-------- .../standalone_pgadmin/controller.go | 3 +- internal/controller/standalone_pgadmin/pod.go | 14 ++++-- .../controller/standalone_pgadmin/pod_test.go | 14 ++++-- .../standalone_pgadmin/statefulset.go | 6 +-- internal/postgres/reconcile.go | 10 ++-- 9 files changed, 60 insertions(+), 98 deletions(-) diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 5e5849ac7..1f503962a 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -174,7 +174,7 @@ func main() { token, _ := registrar.CheckToken() // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(mgr, k8s.IsOpenShift(), log, registrar) + addControllersToManager(mgr, log, registrar) if features.Enabled(feature.BridgeIdentifiers) { constructor := func() *bridge.Client { @@ -214,10 +214,9 @@ func main() { // addControllersToManager adds all PostgreSQL Operator controllers to the provided controller // runtime manager. -func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger, reg registration.Registration) { +func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg registration.Registration) { pgReconciler := &postgrescluster.Reconciler{ Client: mgr.GetClient(), - IsOpenShift: openshift, Owner: postgrescluster.ControllerName, Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), Registration: reg, @@ -242,10 +241,9 @@ func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Lo } pgAdminReconciler := &standalone_pgadmin.PGAdminReconciler{ - Client: mgr.GetClient(), - Owner: "pgadmin-controller", - Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), - IsOpenShift: openshift, + Client: mgr.GetClient(), + Owner: "pgadmin-controller", + Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), } if err := pgAdminReconciler.SetupWithManager(mgr); err != nil { diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 2a622eb0e..dc7f5fcba 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -18,11 +18,9 @@ import ( policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/client-go/discovery" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,6 +31,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/pgaudit" "github.com/crunchydata/postgres-operator/internal/pgbackrest" @@ -51,11 +50,9 @@ const ( // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - DiscoveryClient *discovery.DiscoveryClient - IsOpenShift bool - Owner client.FieldOwner - PodExec func( + Client client.Client + Owner client.FieldOwner + PodExec func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error @@ -94,8 +91,9 @@ func (r *Reconciler) Reconcile( // from its cache. cluster.Default() + // TODO(openshift): Separate this into more specific detections elsewhere. if cluster.Spec.OpenShift == nil { - cluster.Spec.OpenShift = &r.IsOpenShift + cluster.Spec.OpenShift = initialize.Bool(kubernetes.IsOpenShift(ctx)) } // Keep a copy of cluster prior to any manipulations. @@ -482,14 +480,6 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { } } - if r.DiscoveryClient == nil { - var err error - r.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) - if err != nil { - return err - } - } - return builder.ControllerManagedBy(mgr). For(&v1beta1.PostgresCluster{}). Owns(&corev1.ConfigMap{}). @@ -510,28 +500,3 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { r.controllerRefHandlerFuncs()). // watch all StatefulSets Complete(r) } - -// GroupVersionKindExists checks to see whether a given Kind for a given -// GroupVersion exists in the Kubernetes API Server. -func (r *Reconciler) GroupVersionKindExists(groupVersion, kind string) (*bool, error) { - if r.DiscoveryClient == nil { - return initialize.Bool(false), nil - } - - resourceList, err := r.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - if apierrors.IsNotFound(err) { - return initialize.Bool(false), nil - } - - return nil, err - } - - for _, resource := range resourceList.APIResources { - if resource.Kind == kind { - return initialize.Bool(true), nil - } - } - - return initialize.Bool(false), nil -} diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index 76ad19560..2b6550593 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -21,6 +21,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/postgres" @@ -56,14 +57,11 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, return nil } - // Check if the Kube cluster has VolumeSnapshots installed. If VolumeSnapshots - // are not installed, we need to return early. If user is attempting to use - // VolumeSnapshots, return an error, otherwise return nil. - volumeSnapshotKindExists, err := r.GroupVersionKindExists("snapshot.storage.k8s.io/v1", "VolumeSnapshot") - if err != nil { - return err - } - if !*volumeSnapshotKindExists { + // Return early when VolumeSnapshots are not installed in Kubernetes. + // If user is attempting to use VolumeSnapshots, return an error. + if !kubernetes.Has( + ctx, volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot"), + ) { if postgrescluster.Spec.Backups.Snapshots != nil { return errors.New("VolumeSnapshots are not installed/enabled in this Kubernetes cluster; cannot create snapshot.") } else { diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index 98e233649..828ad3ea2 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -16,12 +16,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/events" @@ -33,26 +33,26 @@ import ( func TestReconcileVolumeSnapshots(t *testing.T) { ctx := context.Background() - cfg, cc := setupKubernetes(t) + _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - assert.NilError(t, err) recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - DiscoveryClient: discoveryClient, - Recorder: recorder, + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: recorder, } ns := setupNamespace(t, cc) - // Enable snapshots feature gate + // Enable snapshots feature gate and API gate := feature.NewGate() assert.NilError(t, gate.SetFromMap(map[string]bool{ feature.VolumeSnapshots: true, })) ctx = feature.NewContext(ctx, gate) + ctx = kubernetes.NewAPIContext(ctx, kubernetes.NewAPISet( + volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot"), + )) t.Run("SnapshotsDisabledDeleteSnapshots", func(t *testing.T) { // Create cluster (without snapshots spec) @@ -348,16 +348,13 @@ func TestReconcileVolumeSnapshots(t *testing.T) { func TestReconcileDedicatedSnapshotVolume(t *testing.T) { ctx := context.Background() - cfg, cc := setupKubernetes(t) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - assert.NilError(t, err) + _, cc := setupKubernetes(t) recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - DiscoveryClient: discoveryClient, - Recorder: recorder, + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: recorder, } // Enable snapshots feature gate @@ -1253,14 +1250,11 @@ func TestGetLatestReadySnapshot(t *testing.T) { func TestDeleteSnapshots(t *testing.T) { ctx := context.Background() - cfg, cc := setupKubernetes(t) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - assert.NilError(t, err) + _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - DiscoveryClient: discoveryClient, + Client: cc, + Owner: client.FieldOwner(t.Name()), } ns := setupNamespace(t, cc) diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 81d5fc2d4..8edb22cd5 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -30,8 +30,7 @@ type PGAdminReconciler struct { ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error - Recorder record.EventRecorder - IsOpenShift bool + Recorder record.EventRecorder } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go index bbb39b932..947662b51 100644 --- a/internal/controller/standalone_pgadmin/pod.go +++ b/internal/controller/standalone_pgadmin/pod.go @@ -5,6 +5,7 @@ package standalone_pgadmin import ( + "context" "fmt" "strings" @@ -14,6 +15,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -443,8 +445,8 @@ with open('` + configMountPath + `/` + gunicornConfigFilePath + `') as _f: // podSecurityContext returns a v1.PodSecurityContext for pgadmin that can write // to PersistentVolumes. -func podSecurityContext(r *PGAdminReconciler) *corev1.PodSecurityContext { - podSecurityContext := initialize.PodSecurityContext() +func podSecurityContext(ctx context.Context) *corev1.PodSecurityContext { + psc := initialize.PodSecurityContext() // TODO (dsessler7): Add ability to add supplemental groups @@ -454,9 +456,11 @@ func podSecurityContext(r *PGAdminReconciler) *corev1.PodSecurityContext { // - https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids // - https://docs.k8s.io/tasks/configure-pod-container/security-context/ // - https://docs.openshift.com/container-platform/4.14/authentication/managing-security-context-constraints.html - if !r.IsOpenShift { - podSecurityContext.FSGroup = initialize.Int64(2) + if !kubernetes.Has(ctx, kubernetes.API{ + Group: "security.openshift.io", Kind: "SecurityContextConstraints", + }) { + psc.FSGroup = initialize.Int64(2) } - return podSecurityContext + return psc } diff --git a/internal/controller/standalone_pgadmin/pod_test.go b/internal/controller/standalone_pgadmin/pod_test.go index 19cee5288..6ade50d79 100644 --- a/internal/controller/standalone_pgadmin/pod_test.go +++ b/internal/controller/standalone_pgadmin/pod_test.go @@ -5,6 +5,7 @@ package standalone_pgadmin import ( + "context" "testing" "gotest.tools/v3/assert" @@ -13,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -434,14 +436,16 @@ func TestPodConfigFiles(t *testing.T) { } func TestPodSecurityContext(t *testing.T) { - pgAdminReconciler := &PGAdminReconciler{} - - assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), ` + ctx := context.Background() + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(ctx), ` fsGroup: 2 fsGroupChangePolicy: OnRootMismatch `)) - pgAdminReconciler.IsOpenShift = true - assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), + ctx = kubernetes.NewAPIContext(ctx, kubernetes.NewAPISet(kubernetes.API{ + Group: "security.openshift.io", Version: "v1", + Kind: "SecurityContextConstraints", + })) + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(ctx), `fsGroupChangePolicy: OnRootMismatch`)) } diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index 39e434f18..84f431f5c 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -25,7 +25,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( ctx context.Context, pgadmin *v1beta1.PGAdmin, configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, ) error { - sts := statefulset(r, pgadmin, configmap, dataVolume) + sts := statefulset(ctx, pgadmin, configmap, dataVolume) // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet @@ -58,7 +58,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( // statefulset defines the StatefulSet needed to run pgAdmin. func statefulset( - r *PGAdminReconciler, + ctx context.Context, pgadmin *v1beta1.PGAdmin, configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, @@ -115,7 +115,7 @@ func statefulset( // set the image pull secrets, if any exist sts.Spec.Template.Spec.ImagePullSecrets = pgadmin.Spec.ImagePullSecrets - sts.Spec.Template.Spec.SecurityContext = podSecurityContext(r) + sts.Spec.Template.Spec.SecurityContext = podSecurityContext(ctx) pod(pgadmin, configmap, &sts.Spec.Template.Spec, dataVolume) diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index 344f91dd9..779a0f567 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -276,14 +276,14 @@ func InstancePod(ctx context.Context, // PodSecurityContext returns a v1.PodSecurityContext for cluster that can write // to PersistentVolumes. func PodSecurityContext(cluster *v1beta1.PostgresCluster) *corev1.PodSecurityContext { - podSecurityContext := initialize.PodSecurityContext() + psc := initialize.PodSecurityContext() // Use the specified supplementary groups except for root. The CRD has // similar validation, but we should never emit a PodSpec with that group. // - https://docs.k8s.io/concepts/security/pod-security-standards/ for i := range cluster.Spec.SupplementalGroups { if gid := cluster.Spec.SupplementalGroups[i]; gid > 0 { - podSecurityContext.SupplementalGroups = append(podSecurityContext.SupplementalGroups, gid) + psc.SupplementalGroups = append(psc.SupplementalGroups, gid) } } @@ -293,9 +293,9 @@ func PodSecurityContext(cluster *v1beta1.PostgresCluster) *corev1.PodSecurityCon // - https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids // - https://docs.k8s.io/tasks/configure-pod-container/security-context/ // - https://docs.openshift.com/container-platform/4.8/authentication/managing-security-context-constraints.html - if cluster.Spec.OpenShift == nil || !*cluster.Spec.OpenShift { - podSecurityContext.FSGroup = initialize.Int64(26) + if !initialize.FromPointer(cluster.Spec.OpenShift) { + psc.FSGroup = initialize.Int64(26) } - return podSecurityContext + return psc } From c360e17a5edb6afa8031b165fec8453ddfc6e5c8 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Tue, 19 Nov 2024 11:06:13 -0600 Subject: [PATCH 5/5] Add a linter to remind us about our internal package We should probably extend our discovery runner the next time we want to use the discovery client. --- .golangci.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.golangci.yaml b/.golangci.yaml index d46231c41..d886a4fb1 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -40,6 +40,9 @@ linters-settings: - pkg: github.com/crunchydata/postgres-operator/internal/testing/* desc: The "internal/testing" packages should be used only in tests. + - pkg: k8s.io/client-go/discovery + desc: Use the "internal/kubernetes" package instead. + tests: files: ['$test'] deny: @@ -93,6 +96,11 @@ linters-settings: issues: exclude-generated: strict exclude-rules: + # This internal package is the one place we want to do API discovery. + - linters: [depguard] + path: internal/kubernetes/discovery.go + text: k8s.io/client-go/discovery + # These value types have unmarshal methods. # https://github.com/raeperd/recvcheck/issues/7 - linters: [recvcheck]