diff --git a/api/everest/v1alpha1/databasecluster_types.go b/api/everest/v1alpha1/databasecluster_types.go index 05a0b456b..403af0293 100644 --- a/api/everest/v1alpha1/databasecluster_types.go +++ b/api/everest/v1alpha1/databasecluster_types.go @@ -289,6 +289,8 @@ type Proxy struct { Replicas *int32 `json:"replicas,omitempty"` // Config is the proxy configuration Config string `json:"config,omitempty"` + // Storage is the proxy storage configuration + Storage *Storage `json:"storage"` // Expose is the proxy expose configuration // +kubebuilder:validation:XValidation:rule="self.type == 'internal' || !has(oldSelf.loadBalancerConfigName) || oldSelf.loadBalancerConfigName == '' || (has(self.loadBalancerConfigName) && self.loadBalancerConfigName != '')",message=".spec.proxy.expose.loadBalancerConfigName cannot be cleared once set" Expose Expose `json:"expose,omitempty"` diff --git a/api/everest/v1alpha1/zz_generated.deepcopy.go b/api/everest/v1alpha1/zz_generated.deepcopy.go index 0b1bef52d..9a19ce756 100644 --- a/api/everest/v1alpha1/zz_generated.deepcopy.go +++ b/api/everest/v1alpha1/zz_generated.deepcopy.go @@ -20,12 +20,11 @@ package v1alpha1 import ( + enginefeatures_everestv1alpha1 "github.com/percona/everest-operator/api/enginefeatures.everest/v1alpha1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - enginefeatures_everestv1alpha1 "github.com/percona/everest-operator/api/enginefeatures.everest/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1758,6 +1757,11 @@ func (in *Proxy) DeepCopyInto(out *Proxy) { *out = new(int32) **out = **in } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(Storage) + (*in).DeepCopyInto(*out) + } in.Expose.DeepCopyInto(&out.Expose) in.Resources.DeepCopyInto(&out.Resources) } diff --git a/config/crd/bases/everest.percona.com_databaseclusters.yaml b/config/crd/bases/everest.percona.com_databaseclusters.yaml index 20246df56..a6ccca819 100644 --- a/config/crd/bases/everest.percona.com_databaseclusters.yaml +++ b/config/crd/bases/everest.percona.com_databaseclusters.yaml @@ -494,6 +494,23 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object + storage: + description: Storage is the proxy storage configuration + properties: + class: + description: Class is the storage class to use for the persistent + volume claim + type: string + size: + anyOf: + - type: integer + - type: string + description: Size is the size of the persistent volume claim + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - size + type: object type: description: Type is the proxy type enum: @@ -502,6 +519,8 @@ spec: - proxysql - pgbouncer type: string + required: + - storage type: object sharding: description: Sharding is the sharding configuration. PSMDB-only diff --git a/internal/controller/everest/common/helper.go b/internal/controller/everest/common/helper.go index e3452a75b..b5feb48b8 100644 --- a/internal/controller/everest/common/helper.go +++ b/internal/controller/everest/common/helper.go @@ -996,13 +996,12 @@ func ConfigureStorage( c client.Client, db *everestv1alpha1.DatabaseCluster, currentSize resource.Quantity, - setStorageSizeFunc func(resource.Quantity), + desiredSize resource.Quantity, + storageClass *string, + setStorageSizeFunc func(resource.Quantity, *string), ) error { meta.RemoveStatusCondition(&db.Status.Conditions, everestv1alpha1.ConditionTypeCannotResizeVolume) - desiredSize := db.Spec.Engine.Storage.Size - storageClass := db.Spec.Engine.Storage.Class - // We cannot shrink the volume size. hasStorageShrunk := currentSize.Cmp(desiredSize) > 0 && !currentSize.IsZero() if hasStorageShrunk { @@ -1013,14 +1012,14 @@ func ConfigureStorage( LastTransitionTime: metav1.Now(), ObservedGeneration: db.GetGeneration(), }) - setStorageSizeFunc(currentSize) + setStorageSizeFunc(currentSize, storageClass) return nil } // Check if storage size is being expanded. If not, set the desired size and return early. hasStorageExpanded := currentSize.Cmp(desiredSize) < 0 && !currentSize.IsZero() if !hasStorageExpanded { - setStorageSizeFunc(desiredSize) + setStorageSizeFunc(desiredSize, storageClass) return nil } @@ -1037,11 +1036,11 @@ func ConfigureStorage( LastTransitionTime: metav1.Now(), ObservedGeneration: db.GetGeneration(), }) - setStorageSizeFunc(currentSize) + setStorageSizeFunc(currentSize, storageClass) return nil } - setStorageSizeFunc(desiredSize) + setStorageSizeFunc(desiredSize, storageClass) return nil } diff --git a/internal/controller/everest/common/helper_test.go b/internal/controller/everest/common/helper_test.go index 92b63ec45..d8c76dfe3 100644 --- a/internal/controller/everest/common/helper_test.go +++ b/internal/controller/everest/common/helper_test.go @@ -294,8 +294,12 @@ func TestConfigureStorage(t *testing.T) { // Setup test objects var actualSize resource.Quantity - setSize := func(size resource.Quantity) { + storageClassName := tt.db.Spec.Engine.Storage.Class + desiredSize := tt.db.Spec.Engine.Storage.Size + + setSize := func(size resource.Quantity, class *string) { actualSize = size + storageClassName = class } // Setup fake client with storage class if needed @@ -303,16 +307,17 @@ func TestConfigureStorage(t *testing.T) { if tt.storageClassExists && tt.db.Spec.Engine.Storage.Class != nil { sc := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ - Name: *tt.db.Spec.Engine.Storage.Class, + Name: *storageClassName, }, AllowVolumeExpansion: &tt.storageClassAllowExpansion, } builder.WithObjects(sc) + } client := builder.Build() // Run the test - err := ConfigureStorage(t.Context(), client, tt.db, tt.currentSize, setSize) + err := ConfigureStorage(t.Context(), client, tt.db, tt.currentSize, desiredSize, storageClassName, setSize) // Verify results if tt.expectErr { diff --git a/internal/controller/everest/providers/pg/applier.go b/internal/controller/everest/providers/pg/applier.go index 3d94a6437..4d79735b9 100644 --- a/internal/controller/everest/providers/pg/applier.go +++ b/internal/controller/everest/providers/pg/applier.go @@ -1373,12 +1373,15 @@ func configureStorage( currentSize = current.DataVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] } - setStorageSize := func(size resource.Quantity) { + storageClass := db.Spec.Engine.Storage.Class + desiredSize := db.Spec.Engine.Storage.Size + + setStorageSize := func(size resource.Quantity, storageClass *string) { desired.DataVolumeClaimSpec = corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteOnce, }, - StorageClassName: db.Spec.Engine.Storage.Class, + StorageClassName: storageClass, Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: size, @@ -1387,5 +1390,5 @@ func configureStorage( } } - return common.ConfigureStorage(ctx, c, db, currentSize, setStorageSize) + return common.ConfigureStorage(ctx, c, db, currentSize, desiredSize, storageClass, setStorageSize) } diff --git a/internal/controller/everest/providers/psmdb/applier.go b/internal/controller/everest/providers/psmdb/applier.go index f8905eaeb..05d995686 100644 --- a/internal/controller/everest/providers/psmdb/applier.go +++ b/internal/controller/everest/providers/psmdb/applier.go @@ -940,11 +940,14 @@ func configureStorage( currentSize = current.VolumeSpec.PersistentVolumeClaim.PersistentVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] } - setStorageSize := func(size resource.Quantity) { + storageClass := db.Spec.Engine.Storage.Class + desiredSize := db.Spec.Engine.Storage.Size + + setStorageSize := func(size resource.Quantity, storageClass *string) { desired.VolumeSpec = &psmdbv1.VolumeSpec{ PersistentVolumeClaim: psmdbv1.PVCSpec{ PersistentVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: db.Spec.Engine.Storage.Class, + StorageClassName: storageClass, Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: size, @@ -955,5 +958,5 @@ func configureStorage( } } - return common.ConfigureStorage(ctx, c, db, currentSize, setStorageSize) + return common.ConfigureStorage(ctx, c, db, currentSize, desiredSize, storageClass, setStorageSize) } diff --git a/internal/controller/everest/providers/pxc/applier.go b/internal/controller/everest/providers/pxc/applier.go index 33d084509..d49d05556 100644 --- a/internal/controller/everest/providers/pxc/applier.go +++ b/internal/controller/everest/providers/pxc/applier.go @@ -119,10 +119,66 @@ func configureStorage( } currentSize := getCurrentStorageSize() - setStorageSize := func(size resource.Quantity) { + storageClass := db.Spec.Engine.Storage.Class + desiredSize := db.Spec.Engine.Storage.Size + + setStorageSize := func(size resource.Quantity, storageClass *string) { desired.PXC.PodSpec.VolumeSpec = &pxcv1.VolumeSpec{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: db.Spec.Engine.Storage.Class, + StorageClassName: storageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: size, + }, + }, + }, + } + } + + return common.ConfigureStorage(ctx, c, db, currentSize, desiredSize, storageClass, setStorageSize) +} + +func configureProxySQLStorage( + ctx context.Context, + c client.Client, + desired *pxcv1.PerconaXtraDBClusterSpec, + current *pxcv1.PerconaXtraDBClusterSpec, + db *everestv1alpha1.DatabaseCluster, +) error { + + getCurrentProxySQLStorageSize := func() resource.Quantity { + if db.Status.Status == everestv1alpha1.AppStateNew || + current == nil || + current.ProxySQL == nil || + current.ProxySQL.PodSpec.VolumeSpec == nil || + current.ProxySQL.PodSpec.VolumeSpec.PersistentVolumeClaim == nil { + return resource.Quantity{} + } + return current.ProxySQL.PodSpec.VolumeSpec.PersistentVolumeClaim.Resources.Requests[corev1.ResourceStorage] + } + + getDesiredProxySQLStorageSize := func() resource.Quantity { + if db.Spec.Proxy.Storage == nil || db.Spec.Proxy.Storage.Size.IsZero() { + return resource.MustParse("2Gi") + } + return db.Spec.Proxy.Storage.Size + } + + getStorageClass := func() *string { + if db.Spec.Proxy.Storage == nil || db.Spec.Proxy.Storage.Class == nil { + return db.Spec.Engine.Storage.Class + } + return db.Spec.Proxy.Storage.Class + } + + currentSize := getCurrentProxySQLStorageSize() + desiredSize := getDesiredProxySQLStorageSize() + storageClass := getStorageClass() + + setStorageProxySQLStorageSize := func(size resource.Quantity, storageClass *string) { + desired.ProxySQL.PodSpec.VolumeSpec = &pxcv1.VolumeSpec{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: storageClass, Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: size, @@ -132,7 +188,7 @@ func configureStorage( } } - return common.ConfigureStorage(ctx, c, db, currentSize, setStorageSize) + return common.ConfigureStorage(ctx, c, db, currentSize, desiredSize, storageClass, setStorageProxySQLStorageSize) } // generatePass generates a random password. @@ -479,6 +535,7 @@ func defaultSpec() pxcv1.PerconaXtraDBClusterSpec { corev1.ResourceMemory: resource.MustParse("1G"), corev1.ResourceCPU: resource.MustParse("600m"), }, + Requests: corev1.ResourceList{}, }, ReadinessProbes: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, LivenessProbes: corev1.Probe{TimeoutSeconds: haProxyProbesTimeout}, @@ -492,6 +549,7 @@ func defaultSpec() pxcv1.PerconaXtraDBClusterSpec { corev1.ResourceMemory: resource.MustParse("1G"), corev1.ResourceCPU: resource.MustParse("600m"), }, + Requests: corev1.ResourceList{}, }, }, }, @@ -692,6 +750,10 @@ func (p *applier) applyProxySQLCfg() error { } proxySQL.Image = image + if err := configureProxySQLStorage(p.ctx, p.C, &p.PerconaXtraDBCluster.Spec, &p.currentPerconaXtraDBClusterSpec, p.DB); err != nil { + return err + } + shouldUpdateRequests := common.IsNewDatabaseCluster(p.DB.Status.Status) if !p.DB.Spec.Proxy.Resources.CPU.IsZero() { // When the limits are changed, triggers a pod restart, hence ensuring the requests are applied automatically (next block), @@ -701,7 +763,7 @@ func (p *applier) applyProxySQLCfg() error { // We now set the requests to the same value as the limits, however, we need to ensure that // they're not automatically applied when Everest is upgraded, otherwise it leads to a proxy restart. if shouldUpdateRequests || - p.currentPerconaXtraDBClusterSpec.HAProxy.Resources.Requests.Cpu(). + p.currentPerconaXtraDBClusterSpec.ProxySQL.Resources.Requests.Cpu(). Equal(p.DB.Spec.Proxy.Resources.CPU) { proxySQL.Resources.Requests[corev1.ResourceCPU] = p.DB.Spec.Proxy.Resources.CPU } @@ -714,8 +776,8 @@ func (p *applier) applyProxySQLCfg() error { // We now set the requests to the same value as the limits, however, we need to ensure that // they're not automatically applied when Everest is upgraded, otherwise it leads to a proxy restart. if shouldUpdateRequests || - p.currentPerconaXtraDBClusterSpec.HAProxy.Resources.Requests.Cpu(). - Equal(p.DB.Spec.Proxy.Resources.CPU) { + p.currentPerconaXtraDBClusterSpec.ProxySQL.Resources.Requests.Memory(). + Equal(p.DB.Spec.Proxy.Resources.Memory) { proxySQL.Resources.Requests[corev1.ResourceMemory] = p.DB.Spec.Proxy.Resources.Memory } }