From ccb70a6577d7c79391e1d1a25335bb87ffff33b8 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 20 Jan 2026 11:25:51 +0100 Subject: [PATCH 1/4] kubelet: deprecate DisableCPUQuotaWithExclusiveCPUs We missed the proper process initially, it should have been set to deprecated from the beginning, to convey this is a safety FG for a delicate, long needed fix rather than a FG for a proper feature Signed-off-by: Francesco Romani --- pkg/features/kube_features.go | 1 + test/compatibility_lifecycle/reference/feature_list.md | 2 +- .../reference/versioned_feature_list.yaml | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 77cf644b296..396a9ab2066 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -1246,6 +1246,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate DisableCPUQuotaWithExclusiveCPUs: { {Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.36"), Default: true, PreRelease: featuregate.Deprecated}, // LockToDefault(true) in 1.37, remove in 1.38 }, DisableNodeKubeProxyVersion: { diff --git a/test/compatibility_lifecycle/reference/feature_list.md b/test/compatibility_lifecycle/reference/feature_list.md index 28c34478885..6375d7b74e0 100644 --- a/test/compatibility_lifecycle/reference/feature_list.md +++ b/test/compatibility_lifecycle/reference/feature_list.md @@ -65,7 +65,7 @@ | DeploymentReplicaSetTerminatingReplicas | :ballot_box_with_check: 1.35+ | | 1.33–1.34 | 1.35– | | | | [code](https://cs.k8s.io/?q=%5CbDeploymentReplicaSetTerminatingReplicas%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDeploymentReplicaSetTerminatingReplicas%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DetectCacheInconsistency | :ballot_box_with_check: 1.34+ | | | 1.34– | | | | [code](https://cs.k8s.io/?q=%5CbDetectCacheInconsistency%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDetectCacheInconsistency%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DisableAllocatorDualWrite | :ballot_box_with_check: 1.34+ | :closed_lock_with_key: 1.35+ | 1.31–1.32 | 1.33 | 1.34– | | MultiCIDRServiceAllocator | [code](https://cs.k8s.io/?q=%5CbDisableAllocatorDualWrite%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableAllocatorDualWrite%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | -| DisableCPUQuotaWithExclusiveCPUs | :ballot_box_with_check: 1.33+ | | | 1.33– | | | | [code](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | +| DisableCPUQuotaWithExclusiveCPUs | :ballot_box_with_check: 1.33+ | | | 1.33–1.35 | | 1.36– | | [code](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DisableNodeKubeProxyVersion | :ballot_box_with_check: 1.33+ | :closed_lock_with_key: 1.36+ | 1.29–1.30 | | | 1.31– | | [code](https://cs.k8s.io/?q=%5CbDisableNodeKubeProxyVersion%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableNodeKubeProxyVersion%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DynamicResourceAllocation | :ballot_box_with_check: 1.34+ | :closed_lock_with_key: 1.35+ | 1.26–1.31 | 1.32–1.33 | 1.34– | | | [code](https://cs.k8s.io/?q=%5CbDynamicResourceAllocation%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDynamicResourceAllocation%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | EnvFiles | :ballot_box_with_check: 1.35+ | | 1.34 | 1.35– | | | | [code](https://cs.k8s.io/?q=%5CbEnvFiles%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbEnvFiles%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | diff --git a/test/compatibility_lifecycle/reference/versioned_feature_list.yaml b/test/compatibility_lifecycle/reference/versioned_feature_list.yaml index 15756cc83ba..67ae369208d 100644 --- a/test/compatibility_lifecycle/reference/versioned_feature_list.yaml +++ b/test/compatibility_lifecycle/reference/versioned_feature_list.yaml @@ -459,6 +459,10 @@ lockToDefault: false preRelease: Beta version: "1.33" + - default: true + lockToDefault: false + preRelease: Deprecated + version: "1.36" - name: DisableNodeKubeProxyVersion versionedSpecs: - default: false From 5a2578ce0915a7973486c55dd5415026a418f918 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 20 Jan 2026 10:44:28 +0100 Subject: [PATCH 2/4] kubelet: promote the CustomCPUCFSQuotaPeriod to GA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Promote the CustomCPUCFSQuotaPeriod gate to GA, turning it enabled by default. Short history of this feature so far: As far as git and github history allows to reconstruct, this feature was introduced without KEP/design doc process (kubernetes 1.12). It was a double-opt-in feature: users set to first enable a FG to make the kubelet honor a setting, then change a specific tunable to change the behavior This is more like a “safeguard” FG (Other noteworthy examples: `UnauthenticatedHTTP2DOSMitigation` or `DisableCPUQuotaWithExclusiveCPUs`) rather than a KEP-driven feature gate Major past events: - introduced in: https://github.com/kubernetes/kubernetes/pull/63437 - Discussion about adding a FG: https://github.com/kubernetes/kubernetes/pull/63437#issuecomment-387828654 - The change was initially accepted as straightforward, but it was later discovered systemd had gaps, and the change was downgraded as experimental: https://github.com/kubernetes/kubernetes/pull/63437#issuecomment-391740566 - FG re-introduced because systemd acknowledged and fixed the gap, but the systemd rollout would take quite some time: https://github.com/kubernetes/kubernetes/pull/63437#issuecomment-407950436 There's a gap, still relevant in 2026: missing e2e tests to move past alpha: https://github.com/kubernetes/kubernetes/pull/63437#issuecomment-417674790 Other noteworthy work related to this feature: - https://github.com/kubernetes/kubernetes/pull/94687 - https://github.com/kubernetes/kubernetes/pull/111520 - https://github.com/kubernetes/kubernetes/pull/129726 - https://github.com/kubernetes/kubernetes/pull/133845 Fix and modernize the related unit tests. Signed-off-by: Francesco Romani fixup --- pkg/features/kube_features.go | 2 + .../apis/config/validation/validation_test.go | 4 +- pkg/kubelet/cm/helpers_linux_test.go | 324 ++++-------------- .../reference/feature_list.md | 2 +- .../reference/versioned_feature_list.yaml | 4 + 5 files changed, 74 insertions(+), 262 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 396a9ab2066..3159c6eff76 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -1126,6 +1126,8 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate CPUCFSQuotaPeriod: { {Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha}, + // see https://github.com/kubernetes/kubernetes/pull/136339 for full context + {Version: version.MustParse("1.36"), Default: true, PreRelease: featuregate.GA}, // LockToDefault in 1.37, remove in 1.40 }, CPUManagerPolicyAlphaOptions: { diff --git a/pkg/kubelet/apis/config/validation/validation_test.go b/pkg/kubelet/apis/config/validation/validation_test.go index 43f8fe28b49..7e684697004 100644 --- a/pkg/kubelet/apis/config/validation/validation_test.go +++ b/pkg/kubelet/apis/config/validation/validation_test.go @@ -719,7 +719,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.ImagePullCredentialsVerificationPolicy = "invalid" return conf }, - errMsg: `option "invalid" specified for imagePullCredentialsVerificationPolicy. Valid options are "NeverVerify", "NeverVerifyPreloadedImages", "NeverVerifyAllowlistedImages" or "AlwaysVerify"]`, + errMsg: `option "invalid" specified for imagePullCredentialsVerificationPolicy. Valid options are "NeverVerify", "NeverVerifyPreloadedImages", "NeverVerifyAllowlistedImages" or "AlwaysVerify"`, }, { name: "invalid PreloadedImagesVerificationAllowlist configuration - featuregate enabled", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { @@ -728,7 +728,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.PreloadedImagesVerificationAllowlist = []string{"test.test/repo"} return conf }, - errMsg: "can't set `preloadedImagesVerificationAllowlist` if `imagePullCredentialsVertificationPolicy` is not \"NeverVerifyAllowlistedImages\"]", + errMsg: "can't set `preloadedImagesVerificationAllowlist` if `imagePullCredentialsVertificationPolicy` is not \"NeverVerifyAllowlistedImages\"", }, { name: "invalid PreloadedImagesVerificationAllowlist configuration - featuregate disabled", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go index a4b5a5c16fa..7122b984cbf 100644 --- a/pkg/kubelet/cm/helpers_linux_test.go +++ b/pkg/kubelet/cm/helpers_linux_test.go @@ -55,6 +55,7 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir func TestResourceConfigForPod(t *testing.T) { defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds + tunedQuota := int64(1 * time.Millisecond / time.Microsecond) minShares := uint64(MinShares) burstableShares := MilliCPUToShares(100) @@ -68,14 +69,16 @@ func TestResourceConfigForPod(t *testing.T) { memoryQuantity = resource.MustParse("100Mi") cpuNoLimit := int64(-1) guaranteedMemory := memoryQuantity.Value() - testCases := map[string]struct { + testCases := []struct { + description string pod *v1.Pod expected *ResourceConfig enforceCPULimits bool quotaPeriod uint64 // in microseconds podLevelResourcesEnabled bool }{ - "besteffort": { + { + description: "besteffort", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -89,7 +92,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &minShares}, }, - "burstable-no-limits": { + { + description: "burstable-no-limits", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -103,7 +107,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares}, }, - "burstable-with-limits": { + { + description: "burstable-with-limits", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -117,7 +122,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-with-limits-no-cpu-enforcement": { + { + description: "burstable-with-limits-no-cpu-enforcement", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -131,7 +137,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-partial-limits": { + { + description: "burstable-partial-limits", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -148,7 +155,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, - "burstable-with-limits-with-tuned-quota": { + { + description: "burstable-with-limits-with-tuned-quota", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -160,9 +168,10 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-with-limits-no-cpu-enforcement-with-tuned-quota": { + { + description: "burstable-with-limits-no-cpu-enforcement-with-tuned-quota", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -176,7 +185,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: tunedQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-partial-limits-with-tuned-quota": { + { + description: "burstable-partial-limits-with-tuned-quota", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -193,7 +203,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: tunedQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, - "guaranteed": { + { + description: "guaranteed", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -207,7 +218,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, - "guaranteed-no-cpu-enforcement": { + { + description: "guaranteed-no-cpu-enforcement", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -221,7 +233,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, - "guaranteed-with-tuned-quota": { + { + description: "guaranteed-with-tuned-quota", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -235,7 +248,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: tunedQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, - "guaranteed-no-cpu-enforcement-with-tuned-quota": { + { + description: "guaranteed-no-cpu-enforcement-with-tuned-quota", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -249,7 +263,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: tunedQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, - "burstable-partial-limits-with-init-containers": { + { + description: "burstable-partial-limits-with-init-containers", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -274,7 +289,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: tunedQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, - "besteffort-with-pod-level-resources-enabled": { + { + description: "besteffort-with-pod-level-resources-enabled", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -293,7 +309,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &minShares}, }, - "burstable-with-pod-level-requests": { + { + description: "burstable-with-pod-level-requests", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -311,7 +328,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares}, }, - "burstable-with-pod-and-container-level-requests": { + { + description: "burstable-with-pod-and-container-level-requests", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -330,7 +348,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares}, }, - "burstable-with-pod-level-resources": { + { + description: "burstable-with-pod-level-resources", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -349,7 +368,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-with-pod-and-container-level-resources": { + { + description: "burstable-with-pod-and-container-level-resources", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -369,7 +389,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, - "burstable-with-partial-pod-level-resources-limits": { + { + description: "burstable-with-partial-pod-level-resources-limits", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -388,7 +409,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &burstablePartialShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, - "guaranteed-with-pod-level-resources": { + { + description: "guaranteed-with-pod-level-resources", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -407,7 +429,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, - "guaranteed-with-pod-and-container-level-resources": { + { + description: "guaranteed-with-pod-and-container-level-resources", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -427,7 +450,8 @@ func TestResourceConfigForPod(t *testing.T) { quotaPeriod: defaultQuotaPeriod, expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, - "guaranteed-pod-level-resources-with-init-containers": { + { + description: "guaranteed-pod-level-resources-with-init-containers", pod: &v1.Pod{ Spec: v1.PodSpec{ Resources: &v1.ResourceRequirements{ @@ -455,241 +479,23 @@ func TestResourceConfigForPod(t *testing.T) { }, } - for testName, testCase := range testCases { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled) - actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false) - if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) { - t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod) - } - if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) { - t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota) - } - if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) { - t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, *actual.CPUShares) - } - if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) { - t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory) - } - } -} - -func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { - defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds - tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds - tunedQuota := int64(1 * time.Millisecond / time.Microsecond) - - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true) - - minShares := uint64(MinShares) - burstableShares := MilliCPUToShares(100) - memoryQuantity := resource.MustParse("200Mi") - burstableMemory := memoryQuantity.Value() - burstablePartialShares := MilliCPUToShares(200) - burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod)) - guaranteedShares := MilliCPUToShares(100) - guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod)) - guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod)) - memoryQuantity = resource.MustParse("100Mi") - cpuNoLimit := int64(-1) - guaranteedMemory := memoryQuantity.Value() - testCases := map[string]struct { - pod *v1.Pod - expected *ResourceConfig - enforceCPULimits bool - quotaPeriod uint64 // in microseconds - }{ - "besteffort": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &minShares}, - }, - "burstable-no-limits": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares}, - }, - "burstable-with-limits": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, - }, - "burstable-with-limits-no-cpu-enforcement": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - }, - }, - }, - enforceCPULimits: false, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, - }, - "burstable-partial-limits": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstablePartialShares}, - }, - "burstable-with-limits-with-tuned-quota": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, - }, - "burstable-with-limits-no-cpu-enforcement-with-tuned-quota": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - }, - }, - }, - enforceCPULimits: false, - quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, - }, - "burstable-partial-limits-with-tuned-quota": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), - }, - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &burstablePartialShares}, - }, - "guaranteed": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, - }, - "guaranteed-no-cpu-enforcement": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), - }, - }, - }, - }, - enforceCPULimits: false, - quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, - }, - "guaranteed-with-tuned-quota": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), - }, - }, - }, - }, - enforceCPULimits: true, - quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, - }, - "guaranteed-no-cpu-enforcement-with-tuned-quota": { - pod: &v1.Pod{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), - }, - }, - }, - }, - enforceCPULimits: false, - quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, - }, - } - - for testName, testCase := range testCases { - - actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false) - - if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) { - t.Errorf("unexpected result, test: %v, cpu period not as expected", testName) - } - if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) { - t.Errorf("unexpected result, test: %v, cpu quota not as expected", testName) - } - if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) { - t.Errorf("unexpected result, test: %v, cpu shares not as expected", testName) - } - if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) { - t.Errorf("unexpected result, test: %v, memory not as expected", testName) - } + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled) + actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false) + if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) { + t.Errorf("cpu period not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUPeriod, *actual.CPUPeriod) + } + if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) { + t.Errorf("cpu quota not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUQuota, *actual.CPUQuota) + } + if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) { + t.Errorf("cpu shares not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUShares, *actual.CPUShares) + } + if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) { + t.Errorf("memory not as expected. Expected: %v, Actual:%v", *testCase.expected.Memory, *actual.Memory) + } + }) } } diff --git a/test/compatibility_lifecycle/reference/feature_list.md b/test/compatibility_lifecycle/reference/feature_list.md index 6375d7b74e0..2bf77fb7850 100644 --- a/test/compatibility_lifecycle/reference/feature_list.md +++ b/test/compatibility_lifecycle/reference/feature_list.md @@ -48,7 +48,7 @@ | ContainerStopSignals | | | 1.33– | | | | | [code](https://cs.k8s.io/?q=%5CbContainerStopSignals%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbContainerStopSignals%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | CoordinatedLeaderElection | | | 1.31–1.32 | 1.33– | | | | [code](https://cs.k8s.io/?q=%5CbCoordinatedLeaderElection%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCoordinatedLeaderElection%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | CrossNamespaceVolumeDataSource | | | 1.26– | | | | | [code](https://cs.k8s.io/?q=%5CbCrossNamespaceVolumeDataSource%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCrossNamespaceVolumeDataSource%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | -| CustomCPUCFSQuotaPeriod | | | 1.12– | | | | | [code](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | +| CustomCPUCFSQuotaPeriod | :ballot_box_with_check: 1.36+ | | 1.12–1.35 | | 1.36– | | | [code](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | CustomResourceFieldSelectors | :ballot_box_with_check: 1.31+ | :closed_lock_with_key: 1.32+ | 1.30 | 1.31 | 1.32– | | | [code](https://cs.k8s.io/?q=%5CbCustomResourceFieldSelectors%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomResourceFieldSelectors%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DRAAdminAccess | :ballot_box_with_check: 1.34+ | | 1.32–1.33 | 1.34– | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAAdminAccess%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAAdminAccess%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | | DRAConsumableCapacity | | | 1.34– | | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAConsumableCapacity%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAConsumableCapacity%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) | diff --git a/test/compatibility_lifecycle/reference/versioned_feature_list.yaml b/test/compatibility_lifecycle/reference/versioned_feature_list.yaml index 67ae369208d..abffb3fe4d8 100644 --- a/test/compatibility_lifecycle/reference/versioned_feature_list.yaml +++ b/test/compatibility_lifecycle/reference/versioned_feature_list.yaml @@ -321,6 +321,10 @@ lockToDefault: false preRelease: Alpha version: "1.12" + - default: true + lockToDefault: false + preRelease: GA + version: "1.36" - name: CPUManagerPolicyAlphaOptions versionedSpecs: - default: false From 9bc91a75d3ad074224a3b4aa05f8e93526d9c737 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 20 Jan 2026 11:45:18 +0100 Subject: [PATCH 3/4] e2e: cpumanager: test for custom cpu cfs quota period Add basic e2e test coverage for the CFSCPUPeriod toggle Signed-off-by: Francesco Romani --- test/e2e_node/cpu_manager_test.go | 96 ++++++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 9 deletions(-) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index e0b41a47f47..eaa78ceebe2 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -1859,6 +1859,69 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, fra }) }) + ginkgo.When("using non-default CPU CFS quota period", ginkgo.Label("cfs-period", "cfs-quota"), func() { + var cfsPeriod time.Duration + var testCFSPeriod string + + ginkgo.BeforeEach(func(ctx context.Context) { + requireCGroupV2() + // WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels. + // this means on more-than-2-way SMT systems this test will prove nothing + reservedCPUs = cpuset.New(0) + + // the default QuotaPeriod is in milliseconds + cfsPeriod = time.Duration((cm.QuotaPeriod / 2) * time.Microsecond) + + updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{ + policyName: string(cpumanager.PolicyStatic), + reservedSystemCPUs: reservedCPUs, + customCPUCFSQuotaPeriod: cfsPeriod, // TODO: should we need to do this per-test? + })) + + // we reason in microsecond, kernel is going to expose milliseconds + testCFSPeriod = strconv.FormatInt(int64(cfsPeriod/1000), 10) + }) + + ginkgo.It("should enforce for guaranteed pod", func(ctx context.Context) { + cpuCount := 1 // overshoot, minimum request is 1 + skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount) + + ctnName := "gu-container-cfsquota-enabled" + pod := makeCPUManagerPod("gu-pod-cfs-quota-on", []ctnAttribute{ + { + ctnName: ctnName, + cpuRequest: "500m", + cpuLimit: "500m", + }, + }) + ginkgo.By("creating the test pod") + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) + podMap[string(pod.UID)] = pod + + gomega.Expect(pod).To(HaveSandboxQuotaWithPeriod("25000", testCFSPeriod)) + gomega.Expect(pod).To(HaveContainerQuotaWithPeriod(ctnName, "25000", testCFSPeriod)) + }) + + ginkgo.It("should enforce for burstable pod", func(ctx context.Context) { + skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), 0) + + ctnName := "bu-container-cfsquota-enabled" + pod := makeCPUManagerPod("bu-pod-cfs-quota-on", []ctnAttribute{ + { + ctnName: ctnName, + cpuRequest: "100m", + cpuLimit: "500m", + }, + }) + ginkgo.By("creating the test pod") + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) + podMap[string(pod.UID)] = pod + + gomega.Expect(pod).To(HaveSandboxQuotaWithPeriod("25000", testCFSPeriod)) + gomega.Expect(pod).To(HaveContainerQuotaWithPeriod(ctnName, "25000", testCFSPeriod)) + }) + }) + f.Context("When checking the sidecar containers", framework.WithNodeConformance(), func() { ginkgo.BeforeEach(func(ctx context.Context) { reservedCPUs = cpuset.New(0) @@ -2156,10 +2219,8 @@ func HaveContainerCPUsEqualTo(ctnName string, expectedCPUs cpuset.CPUSet) types. }).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has allowed CPUs <{{.Data.CurrentCPUs}}> not matching the expected value <{{.Data.ExpectedCPUs}}> for container {{.Data.Name}}", md) } -func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher { - md := &msgData{ - ExpectedQuota: expectedQuota, - } +func HaveSandboxQuotaWithPeriod(expectedQuota, cfsPeriod string) types.GomegaMatcher { + md := &msgData{} return gcustom.MakeMatcher(func(actual *v1.Pod) (bool, error) { md.Name = klog.KObj(actual).String() quota, err := getSandboxCFSQuota(actual) @@ -2168,7 +2229,8 @@ func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher { framework.Logf("getSandboxCFSQuota() failed: %v", err) return false, err } - re, err := regexp.Compile(fmt.Sprintf("^%s %s$", expectedQuota, defaultCFSPeriod)) + md.ExpectedQuota = fmt.Sprintf("^%s %s$", expectedQuota, cfsPeriod) + re, err := regexp.Compile(md.ExpectedQuota) if err != nil { return false, err } @@ -2176,10 +2238,9 @@ func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher { }).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has quota <{{.Data.CurrentQuota}}> not matching expected value <{{.Data.ExpectedQuota}}>", md) } -func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher { +func HaveContainerQuotaWithPeriod(ctnName, expectedQuota, cfsPeriod string) types.GomegaMatcher { md := &msgData{ - Name: ctnName, - ExpectedQuota: expectedQuota, + Name: ctnName, } return gcustom.MakeMatcher(func(actual *v1.Pod) (bool, error) { quota, err := getContainerCFSQuota(actual, ctnName, false) @@ -2188,7 +2249,8 @@ func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher { framework.Logf("getContainerCFSQuota(%s) failed: %v", ctnName, err) return false, err } - re, err := regexp.Compile(fmt.Sprintf("^%s %s$", expectedQuota, defaultCFSPeriod)) + md.ExpectedQuota = fmt.Sprintf("^%s %s$", expectedQuota, cfsPeriod) + re, err := regexp.Compile(md.ExpectedQuota) if err != nil { return false, err } @@ -2196,6 +2258,14 @@ func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher { }).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has quota <{{.Data.CurrentQuota}}> not matching expected value <{{.Data.ExpectedQuota}}> for container {{.Data.Name}}", md) } +func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher { + return HaveSandboxQuotaWithPeriod(expectedQuota, defaultCFSPeriod) +} + +func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher { + return HaveContainerQuotaWithPeriod(ctnName, expectedQuota, defaultCFSPeriod) +} + func HaveContainerCPUsThreadSiblings(ctnName string) types.GomegaMatcher { md := &msgData{ Name: ctnName, @@ -2801,6 +2871,7 @@ type cpuManagerKubeletArguments struct { enableCPUManagerOptions bool disableCPUQuotaWithExclusiveCPUs bool enablePodLevelResources bool + customCPUCFSQuotaPeriod time.Duration reservedSystemCPUs cpuset.CPUSet options map[string]string } @@ -2816,6 +2887,13 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku newCfg.FeatureGates["DisableCPUQuotaWithExclusiveCPUs"] = kubeletArguments.disableCPUQuotaWithExclusiveCPUs newCfg.FeatureGates["PodLevelResources"] = kubeletArguments.enablePodLevelResources + if kubeletArguments.customCPUCFSQuotaPeriod != 0 { + newCfg.FeatureGates["CustomCPUCFSQuotaPeriod"] = true + newCfg.CPUCFSQuotaPeriod.Duration = kubeletArguments.customCPUCFSQuotaPeriod + } else { + newCfg.FeatureGates["CustomCPUCFSQuotaPeriod"] = false + } + newCfg.CPUManagerPolicy = kubeletArguments.policyName newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second} From 9dc40c8545893f849a656192ee9b4b996122211a Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Mon, 9 Feb 2026 08:24:50 +0100 Subject: [PATCH 4/4] kubelet: api: validation: remove CustomCPUCFSQuotaPeriod setting we moved the FG to default on, so we don't need to enable it explictly anymore. Signed-off-by: Francesco Romani --- pkg/kubelet/apis/config/validation/validation_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/kubelet/apis/config/validation/validation_test.go b/pkg/kubelet/apis/config/validation/validation_test.go index 7e684697004..b6b2e3f947a 100644 --- a/pkg/kubelet/apis/config/validation/validation_test.go +++ b/pkg/kubelet/apis/config/validation/validation_test.go @@ -72,7 +72,6 @@ var ( ShutdownGracePeriodCriticalPods: metav1.Duration{Duration: 10 * time.Second}, MemoryThrottlingFactor: ptr.To(0.9), FeatureGates: map[string]bool{ - "CustomCPUCFSQuotaPeriod": true, "GracefulNodeShutdown": true, "MemoryQoS": true, "KubeletCrashLoopBackOffMax": true, @@ -165,7 +164,6 @@ func TestValidateKubeletConfiguration(t *testing.T) { }, { name: "invalid CPUCFSQuotaPeriod", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { - conf.FeatureGates = map[string]bool{"CustomCPUCFSQuotaPeriod": true} conf.CPUCFSQuotaPeriod = metav1.Duration{Duration: 2 * time.Second} return conf }, @@ -417,7 +415,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { }, { name: "CrashLoopBackOff.MaxContainerRestartPeriod just a little too high", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { - conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true} + conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true} conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{ // 300.9 seconds MaxContainerRestartPeriod: &metav1.Duration{Duration: 300900 * time.Millisecond}, @@ -429,7 +427,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { { name: "CrashLoopBackOff.MaxContainerRestartPeriod just a little too low", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { - conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true} + conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true} conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{ // 300.9 seconds MaxContainerRestartPeriod: &metav1.Duration{Duration: 999 * time.Millisecond}, @@ -441,13 +439,13 @@ func TestValidateKubeletConfiguration(t *testing.T) { { name: "KubeletCrashLoopBackOffMax feature gate on, no crashLoopBackOff config, ok", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { - conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true} + conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true} return conf }, }, { name: "KubeletCrashLoopBackOffMax feature gate on, but no crashLoopBackOff.MaxContainerRestartPeriod config", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { - conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true} + conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true} conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{} return conf },