mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-18 18:28:18 -05:00
Merge pull request #136339 from ffromani/deprecate-customcpucfsquota-fg-not-feature
move to GA the `CustomCPUCFSquota` feature gate (was: deprecate the FG, not the feature)
This commit is contained in:
commit
e7f26c678a
6 changed files with 171 additions and 278 deletions
|
|
@ -1126,6 +1126,8 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||
|
||||
CPUCFSQuotaPeriod: {
|
||||
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
|
||||
// see https://github.com/kubernetes/kubernetes/pull/136339 for full context
|
||||
{Version: version.MustParse("1.36"), Default: true, PreRelease: featuregate.GA}, // LockToDefault in 1.37, remove in 1.40
|
||||
},
|
||||
|
||||
CPUManagerPolicyAlphaOptions: {
|
||||
|
|
@ -1246,6 +1248,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||
|
||||
DisableCPUQuotaWithExclusiveCPUs: {
|
||||
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.36"), Default: true, PreRelease: featuregate.Deprecated}, // LockToDefault(true) in 1.37, remove in 1.38
|
||||
},
|
||||
|
||||
DisableNodeKubeProxyVersion: {
|
||||
|
|
|
|||
|
|
@ -72,7 +72,6 @@ var (
|
|||
ShutdownGracePeriodCriticalPods: metav1.Duration{Duration: 10 * time.Second},
|
||||
MemoryThrottlingFactor: ptr.To(0.9),
|
||||
FeatureGates: map[string]bool{
|
||||
"CustomCPUCFSQuotaPeriod": true,
|
||||
"GracefulNodeShutdown": true,
|
||||
"MemoryQoS": true,
|
||||
"KubeletCrashLoopBackOffMax": true,
|
||||
|
|
@ -165,7 +164,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
}, {
|
||||
name: "invalid CPUCFSQuotaPeriod",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"CustomCPUCFSQuotaPeriod": true}
|
||||
conf.CPUCFSQuotaPeriod = metav1.Duration{Duration: 2 * time.Second}
|
||||
return conf
|
||||
},
|
||||
|
|
@ -417,7 +415,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
}, {
|
||||
name: "CrashLoopBackOff.MaxContainerRestartPeriod just a little too high",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true}
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true}
|
||||
conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{
|
||||
// 300.9 seconds
|
||||
MaxContainerRestartPeriod: &metav1.Duration{Duration: 300900 * time.Millisecond},
|
||||
|
|
@ -429,7 +427,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
{
|
||||
name: "CrashLoopBackOff.MaxContainerRestartPeriod just a little too low",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true}
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true}
|
||||
conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{
|
||||
// 300.9 seconds
|
||||
MaxContainerRestartPeriod: &metav1.Duration{Duration: 999 * time.Millisecond},
|
||||
|
|
@ -441,13 +439,13 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
{
|
||||
name: "KubeletCrashLoopBackOffMax feature gate on, no crashLoopBackOff config, ok",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true}
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true}
|
||||
return conf
|
||||
},
|
||||
}, {
|
||||
name: "KubeletCrashLoopBackOffMax feature gate on, but no crashLoopBackOff.MaxContainerRestartPeriod config",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true, "CustomCPUCFSQuotaPeriod": true}
|
||||
conf.FeatureGates = map[string]bool{"KubeletCrashLoopBackOffMax": true}
|
||||
conf.CrashLoopBackOff = kubeletconfig.CrashLoopBackOffConfig{}
|
||||
return conf
|
||||
},
|
||||
|
|
@ -719,7 +717,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
conf.ImagePullCredentialsVerificationPolicy = "invalid"
|
||||
return conf
|
||||
},
|
||||
errMsg: `option "invalid" specified for imagePullCredentialsVerificationPolicy. Valid options are "NeverVerify", "NeverVerifyPreloadedImages", "NeverVerifyAllowlistedImages" or "AlwaysVerify"]`,
|
||||
errMsg: `option "invalid" specified for imagePullCredentialsVerificationPolicy. Valid options are "NeverVerify", "NeverVerifyPreloadedImages", "NeverVerifyAllowlistedImages" or "AlwaysVerify"`,
|
||||
}, {
|
||||
name: "invalid PreloadedImagesVerificationAllowlist configuration - featuregate enabled",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
|
|
@ -728,7 +726,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
|||
conf.PreloadedImagesVerificationAllowlist = []string{"test.test/repo"}
|
||||
return conf
|
||||
},
|
||||
errMsg: "can't set `preloadedImagesVerificationAllowlist` if `imagePullCredentialsVertificationPolicy` is not \"NeverVerifyAllowlistedImages\"]",
|
||||
errMsg: "can't set `preloadedImagesVerificationAllowlist` if `imagePullCredentialsVertificationPolicy` is not \"NeverVerifyAllowlistedImages\"",
|
||||
}, {
|
||||
name: "invalid PreloadedImagesVerificationAllowlist configuration - featuregate disabled",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir
|
|||
func TestResourceConfigForPod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds
|
||||
tunedQuota := int64(1 * time.Millisecond / time.Microsecond)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
|
|
@ -68,14 +69,16 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
testCases := map[string]struct {
|
||||
testCases := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64 // in microseconds
|
||||
podLevelResourcesEnabled bool
|
||||
}{
|
||||
"besteffort": {
|
||||
{
|
||||
description: "besteffort",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -89,7 +92,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
{
|
||||
description: "burstable-no-limits",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -103,7 +107,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
{
|
||||
description: "burstable-with-limits",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -117,7 +122,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
{
|
||||
description: "burstable-with-limits-no-cpu-enforcement",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -131,7 +137,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
{
|
||||
description: "burstable-partial-limits",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -148,7 +155,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
{
|
||||
description: "burstable-with-limits-with-tuned-quota",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -160,9 +168,10 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
{
|
||||
description: "burstable-with-limits-no-cpu-enforcement-with-tuned-quota",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -176,7 +185,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
{
|
||||
description: "burstable-partial-limits-with-tuned-quota",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -193,7 +203,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
{
|
||||
description: "guaranteed",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -207,7 +218,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
{
|
||||
description: "guaranteed-no-cpu-enforcement",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -221,7 +233,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
{
|
||||
description: "guaranteed-with-tuned-quota",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -235,7 +248,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
{
|
||||
description: "guaranteed-no-cpu-enforcement-with-tuned-quota",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -249,7 +263,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-init-containers": {
|
||||
{
|
||||
description: "burstable-partial-limits-with-init-containers",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
|
@ -274,7 +289,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||
},
|
||||
"besteffort-with-pod-level-resources-enabled": {
|
||||
{
|
||||
description: "besteffort-with-pod-level-resources-enabled",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -293,7 +309,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &minShares},
|
||||
},
|
||||
"burstable-with-pod-level-requests": {
|
||||
{
|
||||
description: "burstable-with-pod-level-requests",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -311,7 +328,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-pod-and-container-level-requests": {
|
||||
{
|
||||
description: "burstable-with-pod-and-container-level-requests",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -330,7 +348,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-pod-level-resources": {
|
||||
{
|
||||
description: "burstable-with-pod-level-resources",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -349,7 +368,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-pod-and-container-level-resources": {
|
||||
{
|
||||
description: "burstable-with-pod-and-container-level-resources",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -369,7 +389,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-partial-pod-level-resources-limits": {
|
||||
{
|
||||
description: "burstable-with-partial-pod-level-resources-limits",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -388,7 +409,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"guaranteed-with-pod-level-resources": {
|
||||
{
|
||||
description: "guaranteed-with-pod-level-resources",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -407,7 +429,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-pod-and-container-level-resources": {
|
||||
{
|
||||
description: "guaranteed-with-pod-and-container-level-resources",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -427,7 +450,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-pod-level-resources-with-init-containers": {
|
||||
{
|
||||
description: "guaranteed-pod-level-resources-with-init-containers",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
|
|
@ -455,241 +479,23 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled)
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
|
||||
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) {
|
||||
t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
|
||||
t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, *actual.CPUShares)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||
t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond) // in microseconds
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond) // in microseconds
|
||||
tunedQuota := int64(1 * time.Millisecond / time.Microsecond)
|
||||
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
memoryQuantity := resource.MustParse("200Mi")
|
||||
burstableMemory := memoryQuantity.Value()
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64 // in microseconds
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
|
||||
|
||||
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) {
|
||||
t.Errorf("unexpected result, test: %v, cpu quota not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
|
||||
t.Errorf("unexpected result, test: %v, cpu shares not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||
t.Errorf("unexpected result, test: %v, memory not as expected", testName)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled)
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
|
||||
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
|
||||
t.Errorf("cpu period not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUPeriod, *actual.CPUPeriod)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) {
|
||||
t.Errorf("cpu quota not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUQuota, *actual.CPUQuota)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
|
||||
t.Errorf("cpu shares not as expected. Expected: %v, Actual:%v", *testCase.expected.CPUShares, *actual.CPUShares)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||
t.Errorf("memory not as expected. Expected: %v, Actual:%v", *testCase.expected.Memory, *actual.Memory)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@
|
|||
| ContainerStopSignals | | | 1.33– | | | | | [code](https://cs.k8s.io/?q=%5CbContainerStopSignals%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbContainerStopSignals%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| CoordinatedLeaderElection | | | 1.31–1.32 | 1.33– | | | | [code](https://cs.k8s.io/?q=%5CbCoordinatedLeaderElection%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCoordinatedLeaderElection%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| CrossNamespaceVolumeDataSource | | | 1.26– | | | | | [code](https://cs.k8s.io/?q=%5CbCrossNamespaceVolumeDataSource%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCrossNamespaceVolumeDataSource%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| CustomCPUCFSQuotaPeriod | | | 1.12– | | | | | [code](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| CustomCPUCFSQuotaPeriod | :ballot_box_with_check: 1.36+ | | 1.12–1.35 | | 1.36– | | | [code](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomCPUCFSQuotaPeriod%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| CustomResourceFieldSelectors | :ballot_box_with_check: 1.31+ | :closed_lock_with_key: 1.32+ | 1.30 | 1.31 | 1.32– | | | [code](https://cs.k8s.io/?q=%5CbCustomResourceFieldSelectors%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbCustomResourceFieldSelectors%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DRAAdminAccess | :ballot_box_with_check: 1.34+ | | 1.32–1.33 | 1.34– | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAAdminAccess%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAAdminAccess%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DRAConsumableCapacity | | | 1.34– | | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAConsumableCapacity%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAConsumableCapacity%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
|
|
@ -65,7 +65,7 @@
|
|||
| DeploymentReplicaSetTerminatingReplicas | :ballot_box_with_check: 1.35+ | | 1.33–1.34 | 1.35– | | | | [code](https://cs.k8s.io/?q=%5CbDeploymentReplicaSetTerminatingReplicas%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDeploymentReplicaSetTerminatingReplicas%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DetectCacheInconsistency | :ballot_box_with_check: 1.34+ | | | 1.34– | | | | [code](https://cs.k8s.io/?q=%5CbDetectCacheInconsistency%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDetectCacheInconsistency%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DisableAllocatorDualWrite | :ballot_box_with_check: 1.34+ | :closed_lock_with_key: 1.35+ | 1.31–1.32 | 1.33 | 1.34– | | MultiCIDRServiceAllocator | [code](https://cs.k8s.io/?q=%5CbDisableAllocatorDualWrite%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableAllocatorDualWrite%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DisableCPUQuotaWithExclusiveCPUs | :ballot_box_with_check: 1.33+ | | | 1.33– | | | | [code](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DisableCPUQuotaWithExclusiveCPUs | :ballot_box_with_check: 1.33+ | | | 1.33–1.35 | | 1.36– | | [code](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableCPUQuotaWithExclusiveCPUs%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DisableNodeKubeProxyVersion | :ballot_box_with_check: 1.33+ | :closed_lock_with_key: 1.36+ | 1.29–1.30 | | | 1.31– | | [code](https://cs.k8s.io/?q=%5CbDisableNodeKubeProxyVersion%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDisableNodeKubeProxyVersion%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| DynamicResourceAllocation | :ballot_box_with_check: 1.34+ | :closed_lock_with_key: 1.35+ | 1.26–1.31 | 1.32–1.33 | 1.34– | | | [code](https://cs.k8s.io/?q=%5CbDynamicResourceAllocation%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDynamicResourceAllocation%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
| EnvFiles | :ballot_box_with_check: 1.35+ | | 1.34 | 1.35– | | | | [code](https://cs.k8s.io/?q=%5CbEnvFiles%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbEnvFiles%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
|
||||
|
|
|
|||
|
|
@ -321,6 +321,10 @@
|
|||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.12"
|
||||
- default: true
|
||||
lockToDefault: false
|
||||
preRelease: GA
|
||||
version: "1.36"
|
||||
- name: CPUManagerPolicyAlphaOptions
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
|
|
@ -459,6 +463,10 @@
|
|||
lockToDefault: false
|
||||
preRelease: Beta
|
||||
version: "1.33"
|
||||
- default: true
|
||||
lockToDefault: false
|
||||
preRelease: Deprecated
|
||||
version: "1.36"
|
||||
- name: DisableNodeKubeProxyVersion
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
|
|
|
|||
|
|
@ -1859,6 +1859,69 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, fra
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.When("using non-default CPU CFS quota period", ginkgo.Label("cfs-period", "cfs-quota"), func() {
|
||||
var cfsPeriod time.Duration
|
||||
var testCFSPeriod string
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
requireCGroupV2()
|
||||
// WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels.
|
||||
// this means on more-than-2-way SMT systems this test will prove nothing
|
||||
reservedCPUs = cpuset.New(0)
|
||||
|
||||
// the default QuotaPeriod is in milliseconds
|
||||
cfsPeriod = time.Duration((cm.QuotaPeriod / 2) * time.Microsecond)
|
||||
|
||||
updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
|
||||
policyName: string(cpumanager.PolicyStatic),
|
||||
reservedSystemCPUs: reservedCPUs,
|
||||
customCPUCFSQuotaPeriod: cfsPeriod, // TODO: should we need to do this per-test?
|
||||
}))
|
||||
|
||||
// we reason in microsecond, kernel is going to expose milliseconds
|
||||
testCFSPeriod = strconv.FormatInt(int64(cfsPeriod/1000), 10)
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce for guaranteed pod", func(ctx context.Context) {
|
||||
cpuCount := 1 // overshoot, minimum request is 1
|
||||
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
|
||||
|
||||
ctnName := "gu-container-cfsquota-enabled"
|
||||
pod := makeCPUManagerPod("gu-pod-cfs-quota-on", []ctnAttribute{
|
||||
{
|
||||
ctnName: ctnName,
|
||||
cpuRequest: "500m",
|
||||
cpuLimit: "500m",
|
||||
},
|
||||
})
|
||||
ginkgo.By("creating the test pod")
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
podMap[string(pod.UID)] = pod
|
||||
|
||||
gomega.Expect(pod).To(HaveSandboxQuotaWithPeriod("25000", testCFSPeriod))
|
||||
gomega.Expect(pod).To(HaveContainerQuotaWithPeriod(ctnName, "25000", testCFSPeriod))
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce for burstable pod", func(ctx context.Context) {
|
||||
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), 0)
|
||||
|
||||
ctnName := "bu-container-cfsquota-enabled"
|
||||
pod := makeCPUManagerPod("bu-pod-cfs-quota-on", []ctnAttribute{
|
||||
{
|
||||
ctnName: ctnName,
|
||||
cpuRequest: "100m",
|
||||
cpuLimit: "500m",
|
||||
},
|
||||
})
|
||||
ginkgo.By("creating the test pod")
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
podMap[string(pod.UID)] = pod
|
||||
|
||||
gomega.Expect(pod).To(HaveSandboxQuotaWithPeriod("25000", testCFSPeriod))
|
||||
gomega.Expect(pod).To(HaveContainerQuotaWithPeriod(ctnName, "25000", testCFSPeriod))
|
||||
})
|
||||
})
|
||||
|
||||
f.Context("When checking the sidecar containers", framework.WithNodeConformance(), func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
reservedCPUs = cpuset.New(0)
|
||||
|
|
@ -2156,10 +2219,8 @@ func HaveContainerCPUsEqualTo(ctnName string, expectedCPUs cpuset.CPUSet) types.
|
|||
}).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has allowed CPUs <{{.Data.CurrentCPUs}}> not matching the expected value <{{.Data.ExpectedCPUs}}> for container {{.Data.Name}}", md)
|
||||
}
|
||||
|
||||
func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher {
|
||||
md := &msgData{
|
||||
ExpectedQuota: expectedQuota,
|
||||
}
|
||||
func HaveSandboxQuotaWithPeriod(expectedQuota, cfsPeriod string) types.GomegaMatcher {
|
||||
md := &msgData{}
|
||||
return gcustom.MakeMatcher(func(actual *v1.Pod) (bool, error) {
|
||||
md.Name = klog.KObj(actual).String()
|
||||
quota, err := getSandboxCFSQuota(actual)
|
||||
|
|
@ -2168,7 +2229,8 @@ func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher {
|
|||
framework.Logf("getSandboxCFSQuota() failed: %v", err)
|
||||
return false, err
|
||||
}
|
||||
re, err := regexp.Compile(fmt.Sprintf("^%s %s$", expectedQuota, defaultCFSPeriod))
|
||||
md.ExpectedQuota = fmt.Sprintf("^%s %s$", expectedQuota, cfsPeriod)
|
||||
re, err := regexp.Compile(md.ExpectedQuota)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -2176,10 +2238,9 @@ func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher {
|
|||
}).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has quota <{{.Data.CurrentQuota}}> not matching expected value <{{.Data.ExpectedQuota}}>", md)
|
||||
}
|
||||
|
||||
func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher {
|
||||
func HaveContainerQuotaWithPeriod(ctnName, expectedQuota, cfsPeriod string) types.GomegaMatcher {
|
||||
md := &msgData{
|
||||
Name: ctnName,
|
||||
ExpectedQuota: expectedQuota,
|
||||
Name: ctnName,
|
||||
}
|
||||
return gcustom.MakeMatcher(func(actual *v1.Pod) (bool, error) {
|
||||
quota, err := getContainerCFSQuota(actual, ctnName, false)
|
||||
|
|
@ -2188,7 +2249,8 @@ func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher {
|
|||
framework.Logf("getContainerCFSQuota(%s) failed: %v", ctnName, err)
|
||||
return false, err
|
||||
}
|
||||
re, err := regexp.Compile(fmt.Sprintf("^%s %s$", expectedQuota, defaultCFSPeriod))
|
||||
md.ExpectedQuota = fmt.Sprintf("^%s %s$", expectedQuota, cfsPeriod)
|
||||
re, err := regexp.Compile(md.ExpectedQuota)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -2196,6 +2258,14 @@ func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher {
|
|||
}).WithTemplate("Pod {{.Actual.Namespace}}/{{.Actual.Name}} UID {{.Actual.UID}} has quota <{{.Data.CurrentQuota}}> not matching expected value <{{.Data.ExpectedQuota}}> for container {{.Data.Name}}", md)
|
||||
}
|
||||
|
||||
func HaveSandboxQuota(expectedQuota string) types.GomegaMatcher {
|
||||
return HaveSandboxQuotaWithPeriod(expectedQuota, defaultCFSPeriod)
|
||||
}
|
||||
|
||||
func HaveContainerQuota(ctnName, expectedQuota string) types.GomegaMatcher {
|
||||
return HaveContainerQuotaWithPeriod(ctnName, expectedQuota, defaultCFSPeriod)
|
||||
}
|
||||
|
||||
func HaveContainerCPUsThreadSiblings(ctnName string) types.GomegaMatcher {
|
||||
md := &msgData{
|
||||
Name: ctnName,
|
||||
|
|
@ -2801,6 +2871,7 @@ type cpuManagerKubeletArguments struct {
|
|||
enableCPUManagerOptions bool
|
||||
disableCPUQuotaWithExclusiveCPUs bool
|
||||
enablePodLevelResources bool
|
||||
customCPUCFSQuotaPeriod time.Duration
|
||||
reservedSystemCPUs cpuset.CPUSet
|
||||
options map[string]string
|
||||
}
|
||||
|
|
@ -2816,6 +2887,13 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku
|
|||
newCfg.FeatureGates["DisableCPUQuotaWithExclusiveCPUs"] = kubeletArguments.disableCPUQuotaWithExclusiveCPUs
|
||||
newCfg.FeatureGates["PodLevelResources"] = kubeletArguments.enablePodLevelResources
|
||||
|
||||
if kubeletArguments.customCPUCFSQuotaPeriod != 0 {
|
||||
newCfg.FeatureGates["CustomCPUCFSQuotaPeriod"] = true
|
||||
newCfg.CPUCFSQuotaPeriod.Duration = kubeletArguments.customCPUCFSQuotaPeriod
|
||||
} else {
|
||||
newCfg.FeatureGates["CustomCPUCFSQuotaPeriod"] = false
|
||||
}
|
||||
|
||||
newCfg.CPUManagerPolicy = kubeletArguments.policyName
|
||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue