diff --git a/Makefile b/Makefile index ffd076cc8..3e6084a8d 100644 --- a/Makefile +++ b/Makefile @@ -109,7 +109,7 @@ init: cleanup-localbin ## Install development tools $(MAKE) opm .PHONY: format -format: +format: ## Format code. GOOS=$(OS) GOARCH=$(ARCH) go tool gofumpt -l -w . GOOS=$(OS) GOARCH=$(ARCH) go tool goimports -local github.com/percona/everest-operator -l -w . GOOS=$(OS) GOARCH=$(ARCH) go tool gci write --skip-generated -s standard -s default -s "prefix(github.com/percona/everest-operator)" . @@ -174,42 +174,13 @@ k3d-cluster-down: ## Create a K8S cluster for testing rm -f ./tests/kubeconfig || true .PHONY: k3d-upload-image -k3d-upload-image: +k3d-upload-image: ## Upload the everest-operator image to the k3d cluster k3d image import -c everest-operator-test -m direct $(IMG) # Cleanup all resources created by the tests .PHONY: cluster-cleanup -cluster-cleanup: - kubectl delete db --all-namespaces --all --cascade=foreground --ignore-not-found=true || true - @namespaces=$$(kubectl get pxc -A -o jsonpath='{.items[*].metadata.namespace}'); \ - for ns in $$namespaces; do \ - kubectl -n $$ns get pxc -o name | xargs --no-run-if-empty -I{} kubectl patch -n $$ns {} -p '{"metadata":{"finalizers":null}}' --type=merge; \ - done - @namespaces=$$(kubectl get psmdb -A -o jsonpath='{.items[*].metadata.namespace}'); \ - for ns in $$namespaces; do \ - kubectl -n $$ns get psmdb -o name | xargs --no-run-if-empty -I{} kubectl patch -n $$ns {} -p '{"metadata":{"finalizers":null}}' --type=merge; \ - done - @namespaces=$$(kubectl get pg -A -o jsonpath='{.items[*].metadata.namespace}'); \ - for ns in $$namespaces; do \ - kubectl -n $$ns get pg -o name | xargs --no-run-if-empty -I{} kubectl patch -n $$ns {} -p '{"metadata":{"finalizers":null}}' --type=merge; \ - done - @namespaces=$$(kubectl get db -A -o jsonpath='{.items[*].metadata.namespace}'); \ - for ns in $$namespaces; do \ - kubectl -n $$ns get db -o name | xargs --no-run-if-empty -I{} kubectl patch -n $$ns {} -p '{"metadata":{"finalizers":null}}' --type=merge; \ - done - @namespaces=$$(kubectl get db -A -o jsonpath='{.items[*].metadata.namespace}'); \ - for ns in $$namespaces; do \ - kubectl -n $$ns delete -f ./tests/testdata/minio --ignore-not-found || true; \ - done - kubectl delete pvc --all-namespaces --all --ignore-not-found=true || true - kubectl delete backupstorage --all-namespaces --all --ignore-not-found=true || true - kubectl get ns -o name | grep kuttl | xargs --no-run-if-empty kubectl delete || true - kubectl delete ns operators olm --ignore-not-found=true --wait=false || true - sleep 10 - kubectl delete apiservice v1.packages.operators.coreos.com --ignore-not-found=true || true - kubectl get crd -o name | grep .coreos.com$ | xargs --no-run-if-empty kubectl delete || true - kubectl get crd -o name | grep .percona.com$ | xargs --no-run-if-empty kubectl delete || true - kubectl delete crd postgresclusters.postgres-operator.crunchydata.com --ignore-not-found=true || true +cluster-cleanup: ## Cleanup all resources created by the tests from the K8S cluster + ./scripts/cluster-cleanup.sh ##@ Build diff --git a/api/v1alpha1/databasecluster_types.go b/api/v1alpha1/databasecluster_types.go index 07e7d304a..8f59f748a 100644 --- a/api/v1alpha1/databasecluster_types.go +++ b/api/v1alpha1/databasecluster_types.go @@ -69,6 +69,8 @@ const ( ProxyTypeMongos ProxyType = "mongos" // ProxyTypeHAProxy is a HAProxy proxy type. ProxyTypeHAProxy ProxyType = "haproxy" + // ProxyTypeRouter is a Router proxy type for Percona Server for MySQL. + ProxyTypeRouter ProxyType = "router" // ProxyTypeProxySQL is a ProxySQL proxy type. ProxyTypeProxySQL ProxyType = "proxysql" // ProxyTypePGBouncer is a PGBouncer proxy type. @@ -156,7 +158,7 @@ type Resources struct { // Engine is the engine configuration. type Engine struct { // Type is the engine type - // +kubebuilder:validation:Enum:=pxc;postgresql;psmdb + // +kubebuilder:validation:Enum:=pxc;ps;postgresql;psmdb Type EngineType `json:"type"` // Version is the engine version Version string `json:"version,omitempty"` diff --git a/api/v1alpha1/databaseengine_types.go b/api/v1alpha1/databaseengine_types.go index bdb694d85..b93c80808 100644 --- a/api/v1alpha1/databaseengine_types.go +++ b/api/v1alpha1/databaseengine_types.go @@ -36,6 +36,8 @@ const ( // DatabaseEnginePXC represents engine type for PXC clusters. DatabaseEnginePXC EngineType = "pxc" + // DatabaseEnginePS represents engine type for PS clusters. + DatabaseEnginePS EngineType = "ps" // DatabaseEnginePSMDB represents engine type for PSMDB clusters. DatabaseEnginePSMDB EngineType = "psmdb" // DatabaseEnginePostgresql represents engine type for Postgresql clusters. @@ -171,12 +173,12 @@ type OperatorUpgradeStatus struct { Message string `json:"message,omitempty"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:shortName=dbengine; -//+kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type" -//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status" -//+kubebuilder:printcolumn:name="Operator Version",type="string",JSONPath=".status.operatorVersion" +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=dbengine; +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status" +// +kubebuilder:printcolumn:name="Operator Version",type="string",JSONPath=".status.operatorVersion" // DatabaseEngine is the Schema for the databaseengines API. type DatabaseEngine struct { @@ -187,7 +189,7 @@ type DatabaseEngine struct { Status DatabaseEngineStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // DatabaseEngineList contains a list of DatabaseEngine. type DatabaseEngineList struct { diff --git a/bundle/manifests/everest-operator.clusterserviceversion.yaml b/bundle/manifests/everest-operator.clusterserviceversion.yaml index f8ac50a99..5bba3c44f 100644 --- a/bundle/manifests/everest-operator.clusterserviceversion.yaml +++ b/bundle/manifests/everest-operator.clusterserviceversion.yaml @@ -301,6 +301,20 @@ spec: - get - list - watch + - apiGroups: + - ps.percona.com + resources: + - perconaservermysqlbackups + - perconaservermysqlrestores + - perconaservermysqls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - psmdb.percona.com resources: diff --git a/bundle/manifests/everest.percona.com_databaseclusters.yaml b/bundle/manifests/everest.percona.com_databaseclusters.yaml index 947b2f830..a0c11be96 100644 --- a/bundle/manifests/everest.percona.com_databaseclusters.yaml +++ b/bundle/manifests/everest.percona.com_databaseclusters.yaml @@ -317,6 +317,7 @@ spec: description: Type is the engine type enum: - pxc + - ps - postgresql - psmdb type: string diff --git a/cmd/main.go b/cmd/main.go index 3294d3a12..f4eb60c4f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -29,6 +29,7 @@ import ( pgv2 "github.com/percona/percona-postgresql-operator/pkg/apis/pgv2.percona.com/v2" crunchyv1beta1 "github.com/percona/percona-postgresql-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" psmdbv1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + psv1 "github.com/percona/percona-server-mysql-operator/api/v1alpha1" pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -112,6 +113,7 @@ func init() { utilruntime.Must(pgv2.SchemeBuilder.AddToScheme(scheme)) utilruntime.Must(psmdbv1.SchemeBuilder.AddToScheme(scheme)) utilruntime.Must(pxcv1.SchemeBuilder.AddToScheme(scheme)) + utilruntime.Must(psv1.SchemeBuilder.AddToScheme(scheme)) utilruntime.Must(crunchyv1beta1.SchemeBuilder.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/everest.percona.com_databaseclusters.yaml b/config/crd/bases/everest.percona.com_databaseclusters.yaml index 16daa1f64..cce97246a 100644 --- a/config/crd/bases/everest.percona.com_databaseclusters.yaml +++ b/config/crd/bases/everest.percona.com_databaseclusters.yaml @@ -317,6 +317,7 @@ spec: description: Type is the engine type enum: - pxc + - ps - postgresql - psmdb type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3101ec171..0e914ab10 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -168,6 +168,20 @@ rules: - get - list - watch +- apiGroups: + - ps.percona.com + resources: + - perconaservermysqlbackups + - perconaservermysqlrestores + - perconaservermysqls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - psmdb.percona.com resources: diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 3cec3320c..0a435b6bd 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -689,6 +689,7 @@ spec: description: Type is the engine type enum: - pxc + - ps - postgresql - psmdb type: string @@ -8797,6 +8798,20 @@ rules: - get - list - watch +- apiGroups: + - ps.percona.com + resources: + - perconaservermysqlbackups + - perconaservermysqlrestores + - perconaservermysqls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - psmdb.percona.com resources: diff --git a/go.mod b/go.mod index 44fbf87ab..445a911ab 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/percona/percona-backup-mongodb v1.8.1-0.20241212160532-0157f87a7eee github.com/percona/percona-postgresql-operator v0.0.0-20250313094841-676233c83e26 github.com/percona/percona-server-mongodb-operator v1.19.1 + github.com/percona/percona-server-mysql-operator v0.10.0 github.com/percona/percona-xtradb-cluster-operator v1.17.0 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.9.1 @@ -101,7 +102,7 @@ require ( github.com/catenacyber/perfsprint v0.9.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cenkalti/backoff/v5 v5.0.2 // indirect - github.com/cert-manager/cert-manager v1.17.1 // indirect + github.com/cert-manager/cert-manager v1.17.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect @@ -129,7 +130,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/firefart/nonamedreturns v1.0.6 // indirect github.com/flosch/pongo2/v6 v6.0.0 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/getkin/kin-openapi v0.132.0 // indirect @@ -144,7 +145,7 @@ require ( github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-sql-driver/mysql v1.9.1 // indirect + github.com/go-sql-driver/mysql v1.9.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -281,6 +282,7 @@ require ( github.com/reviewdog/go-bitbucket v0.0.0-20201024094602-708c3f6a7de0 // indirect github.com/reviewdog/reviewdog v0.20.3 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/robfig/cron/v3 v3.0.2-0.20210106135023-bc59245fe10e // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/ryancurrah/gomodguard v1.4.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect @@ -374,7 +376,7 @@ require ( golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.34.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/grpc v1.73.0 // indirect diff --git a/go.sum b/go.sum index e6ccac0b6..7cfc38672 100644 --- a/go.sum +++ b/go.sum @@ -222,8 +222,8 @@ github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxY github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.17.1 h1:Aig+lWMoLsmpGd9TOlTvO4t0Ah3D+/vGB37x/f+ZKt0= -github.com/cert-manager/cert-manager v1.17.1/go.mod h1:zeG4D+AdzqA7hFMNpYCJgcQ2VOfFNBa+Jzm3kAwiDU4= +github.com/cert-manager/cert-manager v1.17.2 h1:QQYTEOsHf/Z3BFzKH2sIILHJwZA5Ut0LYZlHyNViupg= +github.com/cert-manager/cert-manager v1.17.2/go.mod h1:2TmjsTQF8GZqc8fgLhXWCfbA6YwWCUHKxerJNbFh9eU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -335,8 +335,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= @@ -434,8 +434,8 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= -github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -882,6 +882,8 @@ github.com/percona/percona-postgresql-operator v0.0.0-20250313094841-676233c83e2 github.com/percona/percona-postgresql-operator v0.0.0-20250313094841-676233c83e26/go.mod h1:3D56UIi6Z0Z2gduNUuBcgjd1RNht3N8RKKmR9Wbfu4o= github.com/percona/percona-server-mongodb-operator v1.19.1 h1:lqIC7V80bZPJwjeYLYl/WA+QVQMHo193uEAx5zyIg84= github.com/percona/percona-server-mongodb-operator v1.19.1/go.mod h1:BEw28t4Byx7NK3APLDc7KENgYxPyMPcDLN2ylML5Jo4= +github.com/percona/percona-server-mysql-operator v0.10.0 h1:LeSylRqqDQltrIp3VdUHNORr92U7y3fJb1WwukuFF6s= +github.com/percona/percona-server-mysql-operator v0.10.0/go.mod h1:76ei54qt/1KIwBVdaF3DHhW1lUSP8JwKLyEa3/9JmKw= github.com/percona/percona-xtradb-cluster-operator v1.17.0 h1:TJSWG/C78jthN48NMW5ys5LOPU59lhTzVxtozUNkgqY= github.com/percona/percona-xtradb-cluster-operator v1.17.0/go.mod h1:6uwWKdJiivkYaknuklbD8GVvJjZSTdpFtEUiVv+3TjM= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= @@ -960,6 +962,8 @@ github.com/reviewdog/reviewdog v0.20.3/go.mod h1:YAHHBArqeZv7Nf3YXwXvAlgSyjxTA4k github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.2-0.20210106135023-bc59245fe10e h1:0xChnl3lhHiXbgSJKgChye0D+DvoItkOdkGcwelDXH0= +github.com/robfig/cron/v3 v3.0.2-0.20210106135023-bc59245fe10e/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -1541,8 +1545,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= diff --git a/internal/consts/consts.go b/internal/consts/consts.go index cc4e9cda9..f7ed87727 100644 --- a/internal/consts/consts.go +++ b/internal/consts/consts.go @@ -40,6 +40,8 @@ const ( // PXCDeploymentName is the name of the Percona XtraDB Cluster operator deployment. PXCDeploymentName = "percona-xtradb-cluster-operator" + // PSDeploymentName is the name of the Percona Server for MySQL operator deployment. + PSDeploymentName = "percona-server-mysql-operator" // PSMDBDeploymentName is the name of the Percona Server for MongoDB operator deployment. PSMDBDeploymentName = "percona-server-mongodb-operator" // PGDeploymentName is the name of the Percona PostgreSQL operator deployment. @@ -47,6 +49,8 @@ const ( // PXCAPIGroup is the API group for Percona XtraDB Cluster. PXCAPIGroup = "pxc.percona.com" + // PSAPIGroup is the API group for Percona Server for MySQL Cluster. + PSAPIGroup = "ps.percona.com" // PSMDBAPIGroup is the API group for Percona Server for MongoDB. PSMDBAPIGroup = "psmdb.percona.com" // PGAPIGroup is the API group for Percona PostgreSQL. @@ -54,6 +58,8 @@ const ( // PerconaXtraDBClusterKind is the kind for Percona XtraDB Cluster. PerconaXtraDBClusterKind = "PerconaXtraDBCluster" + // PerconaServerMySQLKind is the kind for Percona Server for MySQL Cluster. + PerconaServerMySQLKind = "PerconaServerMySQL" // PerconaServerMongoDBKind is the kind for Percona Server for MongoDB. PerconaServerMongoDBKind = "PerconaServerMongoDB" // PerconaPGClusterKind is the kind for Percona PostgreSQL. diff --git a/internal/controller/common/helper.go b/internal/controller/common/helper.go index 8ad1853fb..d75bf42e8 100644 --- a/internal/controller/common/helper.go +++ b/internal/controller/common/helper.go @@ -703,7 +703,7 @@ func deleteBackupsForDatabase( return false, nil } -// HandleUpstreamClusterCleanup handles the cleanup of the psdmb objects. +// HandleUpstreamClusterCleanup handles the cleanup of the upstream DB objects. // Returns true if cleanup is complete. func HandleUpstreamClusterCleanup( ctx context.Context, diff --git a/internal/controller/databasecluster_controller.go b/internal/controller/databasecluster_controller.go index cd3761919..bb67490a6 100644 --- a/internal/controller/databasecluster_controller.go +++ b/internal/controller/databasecluster_controller.go @@ -29,6 +29,7 @@ import ( pgv2 "github.com/percona/percona-postgresql-operator/pkg/apis/pgv2.percona.com/v2" crunchyv1beta1 "github.com/percona/percona-postgresql-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" psmdbv1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + psv1 "github.com/percona/percona-server-mysql-operator/api/v1alpha1" pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -52,6 +53,7 @@ import ( "github.com/percona/everest-operator/internal/controller/common" "github.com/percona/everest-operator/internal/controller/providers" "github.com/percona/everest-operator/internal/controller/providers/pg" + "github.com/percona/everest-operator/internal/controller/providers/ps" "github.com/percona/everest-operator/internal/controller/providers/psmdb" "github.com/percona/everest-operator/internal/controller/providers/pxc" "github.com/percona/everest-operator/internal/predicates" @@ -121,6 +123,8 @@ func (r *DatabaseClusterReconciler) newDBProvider( switch engineType { case everestv1alpha1.DatabaseEnginePXC: return pxc.New(ctx, opts) + case everestv1alpha1.DatabaseEnginePS: + return ps.New(ctx, opts) case everestv1alpha1.DatabaseEnginePostgresql: return pg.New(ctx, opts) case everestv1alpha1.DatabaseEnginePSMDB: @@ -327,6 +331,7 @@ func (r *DatabaseClusterReconciler) ensureDataImportJob( // +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch // +kubebuilder:rbac:groups=pxc.percona.com,resources=perconaxtradbclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ps.percona.com,resources=perconaservermysqls,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=psmdb.percona.com,resources=perconaservermongodbs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=pgv2.percona.com,resources=perconapgclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete @@ -735,6 +740,7 @@ func (r *DatabaseClusterReconciler) initWatchers(controller *builder.Builder, de controller.Watches( &everestv1alpha1.DatabaseEngine{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + // TODO: filter DB clusters by the affected DB engine type return r.databaseClustersInObjectNamespace(ctx, obj) }), builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}, defaultPredicate), @@ -994,6 +1000,10 @@ func (r *DatabaseClusterReconciler) ReconcileWatchers(ctx context.Context) error if err := addWatcher(t, &pxcv1.PerconaXtraDBCluster{}); err != nil { return err } + case everestv1alpha1.DatabaseEnginePS: + if err := addWatcher(t, &psv1.PerconaServerMySQL{}); err != nil { + return err + } case everestv1alpha1.DatabaseEnginePostgresql: if err := addWatcher(t, &pgv2.PerconaPGCluster{}); err != nil { return err diff --git a/internal/controller/databaseclusterbackup_controller.go b/internal/controller/databaseclusterbackup_controller.go index 785cc7c93..e9533e0bb 100644 --- a/internal/controller/databaseclusterbackup_controller.go +++ b/internal/controller/databaseclusterbackup_controller.go @@ -90,6 +90,7 @@ type DatabaseClusterBackupReconciler struct { // +kubebuilder:rbac:groups=everest.percona.com,resources=databaseclusterbackups/status,verbs=get;update;patch // +kubebuilder:rbac:groups=everest.percona.com,resources=databaseclusterbackups/finalizers,verbs=update // +kubebuilder:rbac:groups=pxc.percona.com,resources=perconaxtradbclusterbackups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ps.percona.com,resources=perconaservermysqlbackups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=psmdb.percona.com,resources=perconaservermongodbbackups,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=pgv2.percona.com,resources=perconapgbackups,verbs=get;list;watch;create;update;patch;delete diff --git a/internal/controller/databaseclusterrestore_controller.go b/internal/controller/databaseclusterrestore_controller.go index a7bce276f..f8881f9ca 100644 --- a/internal/controller/databaseclusterrestore_controller.go +++ b/internal/controller/databaseclusterrestore_controller.go @@ -73,6 +73,7 @@ type DatabaseClusterRestoreReconciler struct { // +kubebuilder:rbac:groups=everest.percona.com,resources=databaseclusterrestores/status,verbs=get;update;patch // +kubebuilder:rbac:groups=everest.percona.com,resources=databaseclusterrestores/finalizers,verbs=update // +kubebuilder:rbac:groups=pxc.percona.com,resources=perconaxtradbclusterrestores,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ps.percona.com,resources=perconaservermysqlrestores,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=psmdb.percona.com,resources=perconaservermongodbrestores,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=pgv2.percona.com,resources=perconapgrestores,verbs=get;list;watch;create;update;patch;delete diff --git a/internal/controller/databaseengine_controller.go b/internal/controller/databaseengine_controller.go index 1cefb8c83..d41d165e4 100644 --- a/internal/controller/databaseengine_controller.go +++ b/internal/controller/databaseengine_controller.go @@ -59,6 +59,7 @@ var errInstallPlanNotFound = errors.New("install plan not found") var operatorEngine = map[string]everestv1alpha1.EngineType{ consts.PXCDeploymentName: everestv1alpha1.DatabaseEnginePXC, + consts.PSDeploymentName: everestv1alpha1.DatabaseEnginePS, consts.PSMDBDeploymentName: everestv1alpha1.DatabaseEnginePSMDB, consts.PGDeploymentName: everestv1alpha1.DatabaseEnginePostgresql, } @@ -78,13 +79,13 @@ type DatabaseController interface { ReconcileWatchers(ctx context.Context) error } -//+kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines/finalizers,verbs=update -//+kubebuilder:rbac:groups=operators.coreos.com,resources=installplans,verbs=get;list;watch;update -//+kubebuilder:rbac:groups=operators.coreos.com,resources=clusterserviceversions,verbs=get;list;watch;update -//+kubebuilder:rbac:groups=operators.coreos.com,resources=subscriptions,verbs=get;list;watch -//+kubebuilder:rbac:groups="",resources=pods,verbs=delete +// +kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=everest.percona.com,resources=databaseengines/finalizers,verbs=update +// +kubebuilder:rbac:groups=operators.coreos.com,resources=installplans,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=operators.coreos.com,resources=clusterserviceversions,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=operators.coreos.com,resources=subscriptions,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=pods,verbs=delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -155,7 +156,8 @@ func (r *DatabaseEngineReconciler) Reconcile(ctx context.Context, req ctrl.Reque Backup: matrix.Backup, } - if dbEngine.Spec.Type == everestv1alpha1.DatabaseEnginePXC { + switch dbEngine.Spec.Type { + case everestv1alpha1.DatabaseEnginePXC: for key := range matrix.PXC { // We do not need supporting mysql 5 if strings.HasPrefix(key, "5") { @@ -170,19 +172,27 @@ func (r *DatabaseEngineReconciler) Reconcile(ctx context.Context, req ctrl.Reque versions.Tools = map[string]everestv1alpha1.ComponentsMap{ "logCollector": matrix.LogCollector, } - } - - if dbEngine.Spec.Type == everestv1alpha1.DatabaseEnginePSMDB { + case everestv1alpha1.DatabaseEnginePS: + versions.Engine = matrix.MYSQL + versions.Proxy = map[everestv1alpha1.ProxyType]everestv1alpha1.ComponentsMap{ + everestv1alpha1.ProxyTypeHAProxy: matrix.HAProxy, + } + versions.Tools = map[string]everestv1alpha1.ComponentsMap{ + "pmm": matrix.PMM, + "router": matrix.Router, + "toolkit": matrix.Toolkit, + "orchestrator": matrix.Orchestrator, + } + case everestv1alpha1.DatabaseEnginePSMDB: versions.Engine = matrix.Mongod - } - - if dbEngine.Spec.Type == everestv1alpha1.DatabaseEnginePostgresql { + case everestv1alpha1.DatabaseEnginePostgresql: versions.Engine = matrix.Postgresql versions.Backup = matrix.PGBackRest versions.Proxy = map[everestv1alpha1.ProxyType]everestv1alpha1.ComponentsMap{ everestv1alpha1.ProxyTypePGBouncer: matrix.PGBouncer, } } + dbEngine.Status.AvailableVersions = versions if err := r.Status().Update(ctx, dbEngine); err != nil { @@ -416,7 +426,7 @@ func (r *DatabaseEngineReconciler) listPendingOperatorUpgrades( } installPlanRefs := getInstallPlanRefsForUpgrade(dbEngine, subscription, installPlans) - result := []everestv1alpha1.OperatorUpgrade{} + var result []everestv1alpha1.OperatorUpgrade for v, ipName := range installPlanRefs { result = append(result, everestv1alpha1.OperatorUpgrade{ TargetVersion: v, @@ -444,7 +454,7 @@ func (r *DatabaseEngineReconciler) getOperatorStatus(ctx context.Context, name t } func (r *DatabaseEngineReconciler) ensureDBEnginesInNamespaces(ctx context.Context, namespaces []string) ([]reconcile.Request, error) { - requests := []reconcile.Request{} + var requests []reconcile.Request for _, ns := range namespaces { for operatorName, engineType := range operatorEngine { dbEngine := &everestv1alpha1.DatabaseEngine{ @@ -563,7 +573,7 @@ func getDatabaseEngineRequestsFromCSV(_ context.Context, o client.Object) []reco // getDatabaseEngineRequestsFromInstallPlan returns a list of reconcile.Request for each possible // databaseengine referenced by an InstallPlan. func getDatabaseEngineRequestsFromInstallPlan(_ context.Context, o client.Object) []reconcile.Request { - result := []reconcile.Request{} + var result []reconcile.Request installPlan, ok := o.(*opfwv1alpha1.InstallPlan) if !ok { return result diff --git a/internal/controller/providers/ps/applier.go b/internal/controller/providers/ps/applier.go new file mode 100644 index 000000000..6691fbc45 --- /dev/null +++ b/internal/controller/providers/ps/applier.go @@ -0,0 +1,769 @@ +// everest-operator +// Copyright (C) 2022 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ps + +import ( + "context" + "errors" + "fmt" + + "github.com/AlekSi/pointer" + psv1 "github.com/percona/percona-server-mysql-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" + "github.com/percona/everest-operator/internal/consts" + "github.com/percona/everest-operator/internal/controller/common" +) + +const ( + haProxyProbesTimeout = 30 + passwordMaxLen = 20 + passwordMinLen = 16 + passSymbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "abcdefghijklmnopqrstuvwxyz" + + "0123456789" + + "!#$%&()+,-.<=>?@[]^_{}~" +) + +var errInvalidDataSourceConfiguration = errors.New("invalid dataSource configuration") + +type applier struct { + *Provider + ctx context.Context //nolint:containedctx +} + +func (a *applier) Metadata() error { + if a.PerconaServerMySQL.GetDeletionTimestamp().IsZero() { + for _, f := range []string{ + finalizerDeletePSPodsInOrder, + finalizerDeletePSPVC, + finalizerDeletePSSSL, + } { + controllerutil.AddFinalizer(a.PerconaServerMySQL, f) + } + } + return nil +} + +func (a *applier) Paused(paused bool) { + a.PerconaServerMySQL.Spec.Pause = paused +} + +func (a *applier) AllowUnsafeConfig() { + useInsecureSize := a.DB.Spec.Engine.Replicas == 1 + a.PerconaServerMySQL.Spec.Unsafe = psv1.UnsafeFlags{ + MySQLSize: useInsecureSize, + ProxySize: useInsecureSize, + OrchestratorSize: useInsecureSize, + } +} + +func configureStorage( + ctx context.Context, + c client.Client, + desiredSpec *psv1.PerconaServerMySQLSpec, + currentSpec *psv1.PerconaServerMySQLSpec, + db *everestv1alpha1.DatabaseCluster, +) error { + var currentSize resource.Quantity + if db.Status.Status != everestv1alpha1.AppStateNew { + currentSize = currentSpec.MySQL.PodSpec.VolumeSpec.PersistentVolumeClaim.Resources.Requests[corev1.ResourceStorage] + } + + setStorageSize := func(size resource.Quantity) { + desiredSpec.MySQL.PodSpec.VolumeSpec = &psv1.VolumeSpec{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: db.Spec.Engine.Storage.Class, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: size, + }, + }, + }, + } + } + + return common.ConfigureStorage(ctx, c, db, currentSize, setStorageSize) +} + +// generatePass generates a random password. +// func generatePass() ([]byte, error) { +// randLenDelta, err := rand.Int(rand.Reader, big.NewInt(int64(passwordMaxLen-passwordMinLen))) +// if err != nil { +// return nil, err +// } +// +// b := make([]byte, passwordMinLen+randLenDelta.Int64()) +// for i := range b { +// randInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(passSymbols)))) +// if err != nil { +// return nil, err +// } +// b[i] = passSymbols[randInt.Int64()] +// } +// +// return b, nil +// } + +func (a *applier) Engine() error { + engine := a.DBEngine + if a.DB.Spec.Engine.Version == "" { + a.DB.Spec.Engine.Version = engine.BestEngineVersion() + } + + ps := a.PerconaServerMySQL + + // Update CRVersion, if specified. + desiredCRVersion := pointer.Get(a.DB.Spec.Engine.CRVersion) + if desiredCRVersion != "" { + ps.Spec.CRVersion = desiredCRVersion + } + + ps.Spec.SecretsName = a.DB.Spec.Engine.UserSecretsName + ps.Spec.MySQL.PodSpec.Size = a.DB.Spec.Engine.Replicas + ps.Spec.MySQL.PodSpec.Configuration = a.DB.Spec.Engine.Config + + psEngineVersion, ok := engine.Status.AvailableVersions.Engine[a.DB.Spec.Engine.Version] + if !ok { + return fmt.Errorf("engine version %s not available", a.DB.Spec.Engine.Version) + } + ps.Spec.MySQL.Image = psEngineVersion.ImagePath + + if err := configureStorage(a.ctx, a.C, &ps.Spec, &a.currentPSSpec, a.DB); err != nil { + return err + } + + if !a.DB.Spec.Engine.Resources.CPU.IsZero() { + ps.Spec.MySQL.PodSpec.Resources.Limits[corev1.ResourceCPU] = a.DB.Spec.Engine.Resources.CPU + ps.Spec.MySQL.PodSpec.Resources.Requests[corev1.ResourceCPU] = a.DB.Spec.Engine.Resources.CPU + } + if !a.DB.Spec.Engine.Resources.Memory.IsZero() { + ps.Spec.MySQL.PodSpec.Resources.Limits[corev1.ResourceMemory] = a.DB.Spec.Engine.Resources.Memory + ps.Spec.MySQL.PodSpec.Resources.Requests[corev1.ResourceMemory] = a.DB.Spec.Engine.Resources.Memory + } + hasDBSpecChanged := func() bool { + return a.DB.Status.ObservedGeneration > 0 && a.DB.Status.ObservedGeneration != a.DB.Generation + } + // We preserve the settings for existing DBs, otherwise restarts are seen when upgrading Everest. + // Additionally, we also need to check for the spec changes, otherwise the user can never voluntarily change the resource setting. + // TODO: Remove this once we figure out how to apply such spec changes without automatic restarts. + // See: https://perconadev.atlassian.net/browse/EVEREST-1413 + if a.DB.Status.Status == everestv1alpha1.AppStateReady && !hasDBSpecChanged() { + ps.Spec.MySQL.PodSpec.Resources = a.currentPSSpec.MySQL.PodSpec.Resources + } + + switch a.DB.Spec.Engine.Size() { + case everestv1alpha1.EngineSizeSmall: + ps.Spec.MySQL.PodSpec.LivenessProbe.TimeoutSeconds = 450 + ps.Spec.MySQL.PodSpec.ReadinessProbe.TimeoutSeconds = 450 + case everestv1alpha1.EngineSizeMedium: + ps.Spec.MySQL.PodSpec.LivenessProbe.TimeoutSeconds = 451 + ps.Spec.MySQL.PodSpec.ReadinessProbe.TimeoutSeconds = 451 + case everestv1alpha1.EngineSizeLarge: + ps.Spec.MySQL.PodSpec.LivenessProbe.TimeoutSeconds = 600 + ps.Spec.MySQL.PodSpec.ReadinessProbe.TimeoutSeconds = 600 + } + + ps.Spec.UpgradeOptions = defaultSpec().UpgradeOptions + + return nil +} + +func (a *applier) Backup() error { + bkp, err := a.genPSBackupSpec() + if err != nil { + return err + } + a.PerconaServerMySQL.Spec.Backup = bkp + return nil +} + +func (a *applier) Proxy() error { + proxyType := a.DB.Spec.Proxy.Type + // Apply proxy config. + switch proxyType { + case everestv1alpha1.ProxyTypeHAProxy: + if err := a.applyHAProxyCfg(); err != nil { + return err + } + case everestv1alpha1.ProxyTypeRouter: + if err := a.applyRouterCfg(); err != nil { + return err + } + default: + return fmt.Errorf("invalid proxy type %s", proxyType) + } + return nil +} + +func (a *applier) DataSource() error { + if a.DB.Spec.DataSource == nil { + // Nothing to do. + return nil + } + // Do not restore from datasource until the cluster is ready. + if a.DB.Status.Status != everestv1alpha1.AppStateReady { + return nil + } + return common.ReconcileDBRestoreFromDataSource(a.ctx, a.C, a.DB) +} + +func (a *applier) Monitoring() error { + monitoring, err := common.GetDBMonitoringConfig(a.ctx, a.C, a.DB) + if err != nil { + return err + } + switch monitoring.Spec.Type { + case everestv1alpha1.PMMMonitoringType: + return a.applyPMMCfg(monitoring) + default: + return fmt.Errorf("invalid monitoring type %s", monitoring.Spec.Type) + } +} + +func (a *applier) PodSchedulingPolicy() error { + // FIXME: Implement the PodSchedulingPolicy configuration. + return nil +} + +func defaultSpec() psv1.PerconaServerMySQLSpec { + return psv1.PerconaServerMySQLSpec{ + UpdateStrategy: psv1.SmartUpdateStatefulSetStrategyType, + UpgradeOptions: psv1.UpgradeOptions{ + Apply: "disabled", + VersionServiceEndpoint: "https://check.percona.com", // FIXME: read from config + }, + MySQL: psv1.MySQLSpec{ + ClusterType: psv1.ClusterTypeGR, + AutoRecovery: true, + PodSpec: psv1.PodSpec{ + Size: 3, + ContainerSpec: psv1.ContainerSpec{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + }, + }, + }, + }, + Orchestrator: psv1.OrchestratorSpec{ + Enabled: false, + }, + PMM: &psv1.PMMSpec{ + Enabled: false, + }, + Backup: &psv1.BackupSpec{ + Enabled: false, + }, + Proxy: psv1.ProxySpec{ + HAProxy: &psv1.HAProxySpec{ + Enabled: false, + Expose: psv1.ServiceExpose{ + Type: corev1.ServiceTypeClusterIP, + }, + PodSpec: psv1.PodSpec{ + Size: 3, + ContainerSpec: psv1.ContainerSpec{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + }, + ReadinessProbe: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, + LivenessProbe: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, + }, + }, + }, + Router: &psv1.MySQLRouterSpec{ + Enabled: false, + Expose: psv1.ServiceExpose{ + Type: corev1.ServiceTypeClusterIP, + }, + PodSpec: psv1.PodSpec{ + Size: 3, + ContainerSpec: psv1.ContainerSpec{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("600m"), + }, + }, + ReadinessProbe: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, + LivenessProbe: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, + }, + }, + }, + }, + TLS: &psv1.TLSSpec{}, + Toolkit: &psv1.ToolkitSpec{}, + } +} + +func (a *applier) applyHAProxyCfg() error { + haProxy := defaultSpec().Proxy.HAProxy + haProxy.Enabled = true + + switch a.DB.Spec.Engine.Size() { + case everestv1alpha1.EngineSizeSmall: + haProxy.PodSpec.Resources = haProxyResourceRequirementsSmall + case everestv1alpha1.EngineSizeMedium: + haProxy.PodSpec.Resources = haProxyResourceRequirementsMedium + case everestv1alpha1.EngineSizeLarge: + haProxy.PodSpec.Resources = haProxyResourceRequirementsLarge + } + + if a.DB.Spec.Proxy.Replicas != nil { + haProxy.PodSpec.Size = *a.DB.Spec.Proxy.Replicas + } else { + haProxy.PodSpec.Size = a.DB.Spec.Engine.Replicas + } + + switch a.DB.Spec.Proxy.Expose.Type { + case everestv1alpha1.ExposeTypeInternal: + // No need to set anything, defaults are fine. + case everestv1alpha1.ExposeTypeExternal: + // FIXME: Check for absent key in the map. + annotations := consts.ExposeAnnotationsMap[a.clusterType] + expose := psv1.ServiceExpose{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerSourceRanges: a.DB.Spec.Proxy.Expose.IPSourceRangesStringArray(), + Annotations: annotations, + } + haProxy.Expose = expose + default: + return fmt.Errorf("invalid expose type %s", a.DB.Spec.Proxy.Expose.Type) + } + + if a.DB.Spec.Proxy.Config != "" { + haProxy.PodSpec.Configuration = a.DB.Spec.Proxy.Config + } else { + haProxy.PodSpec.Configuration = haProxyConfigDefault + } + + // Ensure there is an env vars secret for HAProxy + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: psHAProxyEnvSecretName, + Namespace: a.DB.GetNamespace(), + }, + } + + if _, err := controllerutil.CreateOrUpdate(a.ctx, a.C, secret, func() error { + if err := controllerutil.SetOwnerReference(a.DB, secret, a.C.Scheme()); err != nil { + return err + } + secret.Data = haProxyEnvVars + return nil + }); err != nil { + return fmt.Errorf("failed to create or update secret %w", err) + } + haProxy.PodSpec.EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: psHAProxyEnvSecretName, + }, + }, + }, + } + + haProxyAvailVersions, ok := a.DBEngine.Status.AvailableVersions.Proxy[everestv1alpha1.ProxyTypeHAProxy] + if !ok { + return errors.New("haproxy version is not available") + } + bestHAProxyVersion := haProxyAvailVersions.BestVersion() + haProxyVersion, ok := haProxyAvailVersions[bestHAProxyVersion] + if !ok { + return fmt.Errorf("haproxy version %s is not available", bestHAProxyVersion) + } + + // We can update the HAProxy image name only in case the CRVersions match. + // Otherwise, we keep the image unchanged. + image := haProxyVersion.ImagePath + if a.currentPSSpec.Proxy.HAProxy != nil && a.DBEngine.Status.OperatorVersion != a.DB.Status.CRVersion { + image = a.currentPSSpec.Proxy.HAProxy.PodSpec.Image + } + haProxy.PodSpec.Image = image + + shouldUpdateRequests := shouldUpdateResourceRequests(a.DB.Status.Status) + if !a.DB.Spec.Proxy.Resources.CPU.IsZero() { + // When the limits are changed, triggers a pod restart, hence ensuring the requests are applied automatically (next block), + // as it depends on the cluster being in the 'init' state (shouldUpdateRequests). + haProxy.PodSpec.Resources.Limits[corev1.ResourceCPU] = a.DB.Spec.Proxy.Resources.CPU + // We set the requests to the same value as the limits, however, we need to ensure that + // they're not automatically applied when Everest is upgraded, otherwise it leads to a proxy restart. + if shouldUpdateRequests || + a.currentPSSpec.Proxy.HAProxy.Resources.Requests.Cpu(). + Equal(a.DB.Spec.Proxy.Resources.CPU) { + haProxy.PodSpec.Resources.Requests[corev1.ResourceCPU] = a.DB.Spec.Proxy.Resources.CPU + } + } + if !a.DB.Spec.Proxy.Resources.Memory.IsZero() { + // When the limits are changed, triggers a pod restart, hence ensuring the requests are applied automatically (next block), + // as it depends on the cluster being in the 'init' state (shouldUpdateRequests). + haProxy.PodSpec.Resources.Limits[corev1.ResourceMemory] = a.DB.Spec.Proxy.Resources.Memory + // Prior to 1.3.0, we did not set the requests, and this led to some issues. + // We now set the requests to the same value as the limits, however, we need to ensure that + // they're not automatically applied when Everest is upgraded, otherwise it leads to a proxy restart. + if shouldUpdateRequests || + a.currentPSSpec.Proxy.HAProxy.Resources.Requests.Memory(). + Equal(a.DB.Spec.Proxy.Resources.Memory) { + haProxy.PodSpec.Resources.Requests[corev1.ResourceMemory] = a.DB.Spec.Proxy.Resources.Memory + } + } + + return nil +} + +func (a *applier) applyRouterCfg() error { + // FIXME: Implement the Router configuration. + return nil +} + +func shouldUpdateResourceRequests(dbState everestv1alpha1.AppState) bool { + return dbState == everestv1alpha1.AppStateNew || dbState == everestv1alpha1.AppStateInit +} + +// func (a *applier) applyPMMCfg(monitoring *everestv1alpha1.MonitoringConfig) error { +func (a *applier) applyPMMCfg(_ *everestv1alpha1.MonitoringConfig) error { + // FIXME: PS supports PMM 3 only!!! + // https://docs.percona.com/percona-operator-for-mysql/ps/monitoring.html#considerations + + // ps := a.PerconaServerMySQL + // ps.Spec.PMM.Enabled = true + // ps.Spec.PMM.Resources = common.GetPMMResources(pointer.Get(a.DB.Spec.Monitoring), a.DB.Spec.Engine.Size()) + // + // if monitoring.Spec.PMM.Image != "" { + // ps.Spec.PMM.Image = monitoring.Spec.PMM.Image + // } else { + // ps.Spec.PMM.Image = common.DefaultPMMClientImage + // } + // + // pmmURL, err := url.Parse(monitoring.Spec.PMM.URL) + // if err != nil { + // return errors.Join(err, errors.New("invalid monitoring URL")) + // } + // ps.Spec.PMM.ServerHost = pmmURL.Hostname() + // + // apiKey, err := common.GetSecretFromMonitoringConfig(a.ctx, a.C, monitoring) + // if err != nil { + // return err + // } + // + // err = common.CreateOrUpdateSecretData(a.ctx, a.C, a.DB, ps.Spec.SecretsName, map[string][]byte{ + // "pmmserverkey": []byte(apiKey), + // }, false) + // if err != nil { + // return err + // } + return nil +} + +// func (a *applier) genPXCStorageSpec(name, namespace string) (*pxcv1.BackupStorageSpec, *everestv1alpha1.BackupStorage, error) { +// backupStorage := &everestv1alpha1.BackupStorage{} +// err := a.C.Get(a.ctx, types.NamespacedName{Name: name, Namespace: namespace}, backupStorage) +// if err != nil { +// return nil, nil, errors.Join(err, fmt.Errorf("failed to get backup storage %s", name)) +// } +// +// return &pxcv1.BackupStorageSpec{ +// Type: pxcv1.BackupStorageType(backupStorage.Spec.Type), +// // XXX: Remove this once templates will be available +// Resources: corev1.ResourceRequirements{ +// Limits: corev1.ResourceList{ +// corev1.ResourceMemory: resource.MustParse("1G"), +// corev1.ResourceCPU: resource.MustParse("600m"), +// }, +// }, +// VerifyTLS: backupStorage.Spec.VerifyTLS, +// }, backupStorage, nil +// } + +func (a *applier) genPSBackupSpec() (*psv1.BackupSpec, error) { + psBackupSpec := a.PerconaServerMySQL.Spec.Backup + psBackupSpec.Enabled = len(a.DB.Spec.Backup.Schedules) > 0 || + a.DB.Spec.Backup.PITR.Enabled + + // Get the best backup version for the specified database engine + bestBackupVersion := a.DBEngine.BestBackupVersion(a.DB.Spec.Engine.Version) + backupVersion, ok := a.DBEngine.Status.AvailableVersions.Backup[bestBackupVersion] + if !ok { + return nil, fmt.Errorf("backup version %s is not available", bestBackupVersion) + } + + // We can update the image name only in case the CRVersions match. + // Otherwise we keep the image unchanged. + if a.currentPSSpec.Backup != nil && a.DBEngine.Status.OperatorVersion != a.DB.Status.CRVersion { + psBackupSpec.Image = a.currentPSSpec.Backup.Image + } else { + psBackupSpec.Image = backupVersion.ImagePath + } + + psBackupSpec.PiTR = psv1.PiTRSpec{ + Enabled: a.DB.Spec.Backup.PITR.Enabled, + // FIXME: enrich with more fields, like storage name, etc. + } + + // Initialize map to store backup storages + // storages := make(map[string]*pxcv1.BackupStorageSpec) + // + // // List DatabaseClusterBackup objects for this database + // backupList, err := common.DatabaseClusterBackupsThatReferenceObject(a.ctx, a.C, consts.DBClusterBackupDBClusterNameField, database.GetNamespace(), database.GetName()) + // if err != nil { + // return nil, err + // } + // + // // Add the storages used by the DatabaseClusterBackup objects + // if err = a.addBackupStorages(backupList.Items, a.DB.Spec.DataSource, storages); err != nil { + // return nil, err + // } + // + // // Add PITR configuration if enabled + // if a.DB.Spec.Backup.PITR.Enabled { + // if err := a.addPITRConfiguration(storages, psBackupSpec); err != nil { + // return nil, err + // } + // } + // + // // If there are no schedules, just return the storages used in DatabaseClusterBackup objects + // if len(a.DB.Spec.Backup.Schedules) == 0 { + // psBackupSpec.Storages = storages + // return psBackupSpec, nil + // } + // + // // Add scheduled backup configurations + // if err := a.addScheduledBackupsConfiguration(storages, psBackupSpec); err != nil { + // return nil, err + // } + + return psBackupSpec, nil +} + +// +// func (a *applier) addBackupStorages( +// backups []everestv1alpha1.DatabaseClusterBackup, +// dataSource *everestv1alpha1.DataSource, +// storages map[string]*pxcv1.BackupStorageSpec, +// ) error { +// for _, backup := range backups { +// if _, ok := storages[backup.Spec.BackupStorageName]; ok { +// continue +// } +// +// spec, err := a.getStoragesSpec(backup.Spec.BackupStorageName) +// if err != nil { +// return err +// } +// storages[backup.Spec.BackupStorageName] = spec +// } +// // add the storage from datasource. The restore works without listing the related storage in the pxc config, +// // however if the storage is insecure, we need to specify it explicitly to set the insecureTLS flag +// if dataSource != nil && (dataSource.DBClusterBackupName != "" || dataSource.BackupSource != nil) { +// storageName, err := a.getStorageNameFromDataSource(*dataSource) +// if err != nil { +// return err +// } +// if _, ok := storages[storageName]; ok { +// return nil +// } +// +// spec, err := a.getStoragesSpec(storageName) +// if err != nil { +// return err +// } +// +// storages[storageName] = spec +// } +// return nil +// } +// +// func (a *applier) getStorageNameFromDataSource( +// dataSource everestv1alpha1.DataSource, +// ) (string, error) { +// backup := &everestv1alpha1.DatabaseClusterBackup{} +// var storageName string +// if dataSource.DBClusterBackupName != "" { +// err := a.C.Get(context.Background(), types.NamespacedName{ +// Namespace: a.DB.GetNamespace(), +// Name: dataSource.DBClusterBackupName, +// }, backup) +// if err != nil { +// return "", err +// } +// storageName = backup.Spec.BackupStorageName +// } else if dataSource.BackupSource != nil { +// storageName = dataSource.BackupSource.BackupStorageName +// } +// if storageName == "" { +// return "", errInvalidDataSourceConfiguration +// } +// return storageName, nil +// } +// +// func (a *applier) getStoragesSpec(backupStorageName string) (*pxcv1.BackupStorageSpec, error) { +// spec, backupStorage, err := a.genPXCStorageSpec( +// backupStorageName, +// a.DB.GetNamespace(), +// ) +// if err != nil { +// return nil, errors.Join(err, fmt.Errorf("failed to generate PXC storage spec for %s", backupStorageName)) +// } +// +// switch backupStorage.Spec.Type { +// case everestv1alpha1.BackupStorageTypeS3: +// spec.S3 = &pxcv1.BackupStorageS3Spec{ +// Bucket: fmt.Sprintf( +// "%s/%s", +// backupStorage.Spec.Bucket, +// common.BackupStoragePrefix(a.DB), +// ), +// CredentialsSecret: backupStorage.Spec.CredentialsSecretName, +// Region: backupStorage.Spec.Region, +// EndpointURL: backupStorage.Spec.EndpointURL, +// } +// case everestv1alpha1.BackupStorageTypeAzure: +// spec.Azure = &pxcv1.BackupStorageAzureSpec{ +// ContainerPath: fmt.Sprintf( +// "%s/%s", +// backupStorage.Spec.Bucket, +// common.BackupStoragePrefix(a.DB), +// ), +// CredentialsSecret: backupStorage.Spec.CredentialsSecretName, +// } +// default: +// return nil, fmt.Errorf("unsupported backup storage type %s for %s", backupStorage.Spec.Type, backupStorage.Name) +// } +// return spec, nil +// } +// +// func (a *applier) addPITRConfiguration(storages map[string]*pxcv1.BackupStorageSpec, pxcBackupSpec *pxcv1.PXCScheduledBackup) error { +// database := a.DB +// storageName := *database.Spec.Backup.PITR.BackupStorageName +// +// spec, backupStorage, err := a.genPXCStorageSpec(storageName, database.GetNamespace()) +// if err != nil { +// return errors.Join(err, errors.New("failed to get pitr storage")) +// } +// pxcBackupSpec.PITR.StorageName = common.PITRStorageName(storageName) +// +// var timeBetweenUploads float64 +// if database.Spec.Backup.PITR.UploadIntervalSec != nil { +// timeBetweenUploads = float64(*database.Spec.Backup.PITR.UploadIntervalSec) +// } +// pxcBackupSpec.PITR.TimeBetweenUploads = timeBetweenUploads +// +// switch backupStorage.Spec.Type { +// case everestv1alpha1.BackupStorageTypeS3: +// spec.S3 = &pxcv1.BackupStorageS3Spec{ +// Bucket: common.PITRBucketName(database, backupStorage.Spec.Bucket), +// CredentialsSecret: backupStorage.Spec.CredentialsSecretName, +// Region: backupStorage.Spec.Region, +// EndpointURL: backupStorage.Spec.EndpointURL, +// } +// default: +// return fmt.Errorf("BackupStorage of type %s is not supported. PITR only works for s3 compatible storages", backupStorage.Spec.Type) +// } +// +// // create a separate storage for pxc pitr as the docs recommend +// // https://docs.percona.com/percona-operator-for-mysql/pxc/backups-pitr.html +// storages[common.PITRStorageName(backupStorage.Name)] = spec +// return nil +// } +// +// func (a *applier) addScheduledBackupsConfiguration( +// storages map[string]*pxcv1.BackupStorageSpec, +// pxcBackupSpec *pxcv1.PXCScheduledBackup, +// ) error { +// database := a.DB +// var pxcSchedules []pxcv1.PXCScheduledBackupSchedule //nolint:prealloc +// for _, schedule := range database.Spec.Backup.Schedules { +// if !schedule.Enabled { +// continue +// } +// +// // Add the storages used by the schedule backups +// if _, ok := storages[schedule.BackupStorageName]; !ok { +// backupStorage := &everestv1alpha1.BackupStorage{} +// err := a.C.Get(a.ctx, types.NamespacedName{ +// Name: schedule.BackupStorageName, +// Namespace: database.GetNamespace(), +// }, backupStorage) +// if err != nil { +// return errors.Join(err, fmt.Errorf("failed to get backup storage %s", schedule.BackupStorageName)) +// } +// +// storages[schedule.BackupStorageName] = &pxcv1.BackupStorageSpec{ +// Type: pxcv1.BackupStorageType(backupStorage.Spec.Type), +// VerifyTLS: backupStorage.Spec.VerifyTLS, +// } +// switch backupStorage.Spec.Type { +// case everestv1alpha1.BackupStorageTypeS3: +// storages[schedule.BackupStorageName].S3 = &pxcv1.BackupStorageS3Spec{ +// Bucket: fmt.Sprintf( +// "%s/%s", +// backupStorage.Spec.Bucket, +// common.BackupStoragePrefix(database), +// ), +// CredentialsSecret: backupStorage.Spec.CredentialsSecretName, +// Region: backupStorage.Spec.Region, +// EndpointURL: backupStorage.Spec.EndpointURL, +// } +// case everestv1alpha1.BackupStorageTypeAzure: +// storages[schedule.BackupStorageName].Azure = &pxcv1.BackupStorageAzureSpec{ +// ContainerPath: fmt.Sprintf( +// "%s/%s", +// backupStorage.Spec.Bucket, +// common.BackupStoragePrefix(database), +// ), +// CredentialsSecret: backupStorage.Spec.CredentialsSecretName, +// } +// default: +// return fmt.Errorf("unsupported backup storage type %s for %s", backupStorage.Spec.Type, backupStorage.Name) +// } +// } +// +// pxcSchedules = append(pxcSchedules, pxcv1.PXCScheduledBackupSchedule{ +// Name: schedule.Name, +// Schedule: schedule.Schedule, +// Keep: int(schedule.RetentionCopies), +// StorageName: schedule.BackupStorageName, +// }) +// } +// +// pxcBackupSpec.Storages = storages +// pxcBackupSpec.Schedule = pxcSchedules +// return nil +// } diff --git a/internal/controller/providers/ps/haproxy_configs.go b/internal/controller/providers/ps/haproxy_configs.go new file mode 100644 index 000000000..a15292435 --- /dev/null +++ b/internal/controller/providers/ps/haproxy_configs.go @@ -0,0 +1,107 @@ +// everest-operator +// Copyright (C) 2022 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ps + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // A haproxyConfigDefault is the default HAProxy configuration. + //nolint:lll + haProxyConfigDefault = ` + global + maxconn 2048 + external-check + insecure-fork-wanted + stats socket /etc/haproxy/mysql/haproxy.sock mode 600 expose-fd listeners level admin + + defaults + default-server init-addr last,libc,none + log global + mode tcp + retries 10 + timeout client 28800s + timeout connect 100500 + timeout server 28800s + + frontend mysql-primary-in + bind *:3309 accept-proxy + bind *:3306 + mode tcp + option clitcpka + default_backend mysql-primary + + frontend mysql-replicas-in + bind *:3307 + mode tcp + option clitcpka + default_backend mysql-replicas + + frontend stats + bind *:8404 + mode http + http-request use-service prometheus-exporter if { path /metrics } +` + + haConnectionTimeout = 1000 +) + +// A haProxyEnvVars contains the environment variables to be set in the HAProxy container. +var haProxyEnvVars = map[string][]byte{ + "HA_CONNECTION_TIMEOUT": []byte(strconv.Itoa(haConnectionTimeout)), +} + +var ( + // A haProxyResourceRequirementsSmall is the resource requirements for HAProxy for small clusters. + haProxyResourceRequirementsSmall = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("195Mi"), + corev1.ResourceCPU: resource.MustParse("190m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("204Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + } + + // A haProxyResourceRequirementsMedium is the resource requirements for HAProxy for medium clusters. + haProxyResourceRequirementsMedium = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("778Mi"), + corev1.ResourceCPU: resource.MustParse("532m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("820Mi"), + corev1.ResourceCPU: resource.MustParse("560m"), + }, + } + + // A haProxyResourceRequirementsLarge is the resource requirements for HAProxy for large clusters. + haProxyResourceRequirementsLarge = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("2.84Gi"), + corev1.ResourceCPU: resource.MustParse("818m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("3Gi"), + corev1.ResourceCPU: resource.MustParse("861m"), + }, + } +) diff --git a/internal/controller/providers/ps/provider.go b/internal/controller/providers/ps/provider.go new file mode 100644 index 000000000..40b202edd --- /dev/null +++ b/internal/controller/providers/ps/provider.go @@ -0,0 +1,228 @@ +// everest-operator +// Copyright (C) 2022 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ps contains the provider for Percona Server for MySQL Cluster. +package ps + +import ( + "context" + + psv1 "github.com/percona/percona-server-mysql-operator/api/v1alpha1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" + "github.com/percona/everest-operator/internal/consts" + "github.com/percona/everest-operator/internal/controller/common" + "github.com/percona/everest-operator/internal/controller/providers" + "github.com/percona/everest-operator/internal/controller/version" +) + +const ( + psHAProxyEnvSecretName = "ps-haproxy-env-secret" //nolint:gosec // This is not a credential, only a secret name. + + finalizerDeletePSPodsInOrder = "delete-mysql-pods-in-order" + finalizerDeletePSPVC = "delete-mysql-pvc" + finalizerDeletePSSSL = "delete-ssl" +) + +// Provider is a provider for Percona Server for MySQL Cluster. +type Provider struct { + providers.ProviderOptions + *psv1.PerconaServerMySQL + + // currentPSSpec holds the current PS spec. + currentPSSpec psv1.PerconaServerMySQLSpec + + clusterType consts.ClusterType + operatorVersion *version.Version +} + +// New returns a new provider for Percona Server for MySQL Cluster. +func New( + ctx context.Context, + opts providers.ProviderOptions, +) (*Provider, error) { + ps := &psv1.PerconaServerMySQL{} + err := opts.C.Get( + ctx, + types.NamespacedName{Name: opts.DB.GetName(), Namespace: opts.DB.GetNamespace()}, + ps) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, err + } + + dbEngine, err := common.GetDatabaseEngine(ctx, opts.C, consts.PSDeploymentName, opts.DB.GetNamespace()) + if err != nil { + return nil, err + } + opts.DBEngine = dbEngine + + // Get operator version. + // v, err := common.GetOperatorVersion(ctx, opts.C, types.NamespacedName{ + // Name: consts.PSDeploymentName, + // Namespace: opts.DB.GetNamespace(), + // }) + + // TODO: Switch all providers read dbEngine.Status.OperatorVersion. + v, err := version.NewVersion(dbEngine.Status.OperatorVersion) + if err != nil { + return nil, err + } + + currentSpec := ps.Spec + ps.Spec = defaultSpec() + + p := &Provider{ + PerconaServerMySQL: ps, + ProviderOptions: opts, + operatorVersion: v, + currentPSSpec: currentSpec, + } + + // Get cluster type. + ct, err := common.GetClusterType(ctx, p.C) + if err != nil { + return nil, err + } + p.clusterType = ct + + if err := p.ensureDefaults(ctx); err != nil { + return nil, err + } + return p, nil +} + +// Apply returns the applier for Percona Server for MySQL Cluster. +// +//nolint:ireturn +func (p *Provider) Apply(ctx context.Context) everestv1alpha1.Applier { + return &applier{ + Provider: p, + ctx: ctx, + } +} + +func (p *Provider) dbEngineVersionOrDefault() string { + engineVersion := p.DB.Spec.Engine.Version + if engineVersion == "" { + engineVersion = p.DBEngine.BestEngineVersion() + } + return engineVersion +} + +func (p *Provider) ensureDefaults(ctx context.Context) error { + db := p.DB + updated := false + if db.Spec.Proxy.Type == "" { + db.Spec.Proxy.Type = everestv1alpha1.ProxyTypeHAProxy + updated = true + } + + // FIXME: Determine PS config for each cluster size. + // if db.Spec.Engine.Config == "" { + // switch db.Spec.Engine.Size() { + // case everestv1alpha1.EngineSizeSmall: + // db.Spec.Engine.Config = pxcConfigSizeSmall + // case everestv1alpha1.EngineSizeMedium: + // db.Spec.Engine.Config = pxcConfigSizeMedium + // case everestv1alpha1.EngineSizeLarge: + // db.Spec.Engine.Config = pxcConfigSizeLarge + // } + // updated = true + // } + + if updated { + return p.C.Update(ctx, db) + } + return nil +} + +// Status builds the DatabaseCluster Status based on the current state of the Percona Server for MySQL Cluster. +func (p *Provider) Status(ctx context.Context) (everestv1alpha1.DatabaseClusterStatus, error) { + status := p.DB.Status + ps := p.PerconaServerMySQL + + status.Status = everestv1alpha1.AppState(ps.Status.State).WithCreatingState() + status.Hostname = ps.Status.Host + status.Ready = ps.Status.MySQL.Ready + // status.Size = ps.Status.Size + // status.Message = strings.Join(ps.Status.Messages, ";") + status.Port = 3306 + status.CRVersion = ps.Spec.CRVersion + status.Details = common.StatusAsPlainTextOrEmptyString(ps.Status) + + // If a restore is running for this database, set the database status to restoring. + if restoring, err := common.IsDatabaseClusterRestoreRunning(ctx, p.C, p.DB.GetName(), p.DB.GetNamespace()); err != nil { + return status, err + } else if restoring { + status.Status = everestv1alpha1.AppStateRestoring + } + + // If the current version of the database is different from the version in + // the CR, an upgrade is pending or in progress. + if p.DB.Spec.Engine.Version != "" && ps.Status.MySQL.Version != "" && p.DB.Spec.Engine.Version != ps.Status.MySQL.Version { + status.Status = everestv1alpha1.AppStateUpgrading + } + + recCRVer, err := common.GetRecommendedCRVersion(ctx, p.C, consts.PSDeploymentName, p.DB) + if err != nil && !k8serrors.IsNotFound(err) { + return status, err + } + status.RecommendedCRVersion = recCRVer + + return status, nil +} + +// when a PXC restore is in progress, we will retry reconciliation +// after the specified duration. +// const defaultRestoreRequeueDuration = 15 * time.Second + +// RunPreReconcileHook runs the pre-reconcile hook for the PS provider. +func (p *Provider) RunPreReconcileHook(_ context.Context) (providers.HookResult, error) { + // The pxc-operator does some funny things to the PXC spec during a restore. + // We must avoid interfering with that process, so we simply skip reconciliation. + // Replicating the same behavior here would be a nightmare, so its simpler to just do this. + + // TODO: Uncomment this if PS-operator does the same as PXC-operator. + // if ok, err := common.IsDatabaseClusterRestoreRunning(ctx, p.C, p.DB.GetName(), p.DB.GetNamespace()); err != nil { + // return providers.HookResult{}, err + // } else if ok { + // return providers.HookResult{ + // RequeueAfter: defaultRestoreRequeueDuration, + // Message: "Restore is in progress", + // }, nil + // } + return providers.HookResult{}, nil +} + +// Cleanup runs the cleanup routines and returns true if the cleanup is done. +func (p *Provider) Cleanup(ctx context.Context, database *everestv1alpha1.DatabaseCluster) (bool, error) { + return common.HandleUpstreamClusterCleanup(ctx, p.C, database, &psv1.PerconaServerMySQL{}) +} + +// DBObject returns the PerconaServerMySQL object. +// +//nolint:ireturn +func (p *Provider) DBObject() client.Object { + p.PerconaServerMySQL.SetGroupVersionKind(schema.GroupVersionKind{ + Group: consts.PSAPIGroup, + Version: p.operatorVersion.ToK8sVersion(), + Kind: consts.PerconaServerMySQLKind, + }) + return p.PerconaServerMySQL +} diff --git a/internal/controller/users.go b/internal/controller/users.go index be0f01058..8075cf8a6 100644 --- a/internal/controller/users.go +++ b/internal/controller/users.go @@ -19,6 +19,7 @@ import everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" var userSecretKeys = map[everestv1alpha1.EngineType][]everestv1alpha1.SecretKey{ everestv1alpha1.DatabaseEnginePXC: pxcUserKeys, + everestv1alpha1.DatabaseEnginePS: psUserKeys, everestv1alpha1.DatabaseEnginePSMDB: psmdbUserKeys, // not supported until K8SPG-570 is fixed. @@ -53,6 +54,40 @@ var ( Description: "Password for the replication user", }, } + psUserKeys = []everestv1alpha1.SecretKey{ + { + Name: "monitor", + Description: "Password for monitoring user", + }, + { + Name: "root", + Description: "Password for root user", + }, + { + Name: "orchestrator", + Description: "Password for Orchestrator admin user", + }, + { + Name: "xtrabackup", + Description: "Password for backup user", + }, + { + Name: "operator", + Description: "Password for the operator user", + }, + { + Name: "replication", + Description: "Password for the replication user", + }, + { + Name: "heartbeat", + Description: "Password for the heartbeat checks user", + }, + { + Name: "pmmserverkey", + Description: "PMM key", + }, + } psmdbUserKeys = []everestv1alpha1.SecretKey{ { Name: "MONGODB_BACKUP_USER", diff --git a/internal/controller/version/version_service.go b/internal/controller/version/version_service.go index 0e513620d..29ca4bebb 100644 --- a/internal/controller/version/version_service.go +++ b/internal/controller/version/version_service.go @@ -35,12 +35,17 @@ type ( Backup map[string]*everestv1alpha1.Component `json:"backup"` Mongod map[string]*everestv1alpha1.Component `json:"mongod"` PXC map[string]*everestv1alpha1.Component `json:"pxc"` + MYSQL map[string]*everestv1alpha1.Component `json:"mysql"` ProxySQL map[string]*everestv1alpha1.Component `json:"proxysql"` HAProxy map[string]*everestv1alpha1.Component `json:"haproxy"` LogCollector map[string]*everestv1alpha1.Component `json:"logCollector"` Postgresql map[string]*everestv1alpha1.Component `json:"postgresql"` PGBackRest map[string]*everestv1alpha1.Component `json:"pgbackrest"` PGBouncer map[string]*everestv1alpha1.Component `json:"pgbouncer"` + PMM map[string]*everestv1alpha1.Component `json:"pmm"` + Router map[string]*everestv1alpha1.Component `json:"router"` + Orchestrator map[string]*everestv1alpha1.Component `json:"orchestrator"` + Toolkit map[string]*everestv1alpha1.Component `json:"toolkit"` } // Response is a response model for version service response parsing. Response struct { @@ -57,6 +62,7 @@ const ( var operatorNames = map[everestv1alpha1.EngineType]string{ everestv1alpha1.DatabaseEnginePXC: "pxc-operator", + everestv1alpha1.DatabaseEnginePS: "ps-operator", everestv1alpha1.DatabaseEnginePSMDB: "psmdb-operator", everestv1alpha1.DatabaseEnginePostgresql: "pg-operator", }