DRA: e2e: move util_sampledevice.go to e2e/node/framework

- Moved sample device plugin constants and helper code to the
test/e2e/node/framework, so that both deviceplugin and DRA tests can
use it without creating e2e -> e2e_node dependency.

- Moved SampleDevsAmount constant from the
test/e2e_node/device_plugin_test.go
This commit is contained in:
Ed Bartosh 2026-02-10 14:04:27 +02:00
parent 669b1de008
commit 02485d02ea
7 changed files with 70 additions and 70 deletions

View file

@ -23,6 +23,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -56,9 +57,9 @@ func deployDevicePlugin(ctx context.Context, f *framework.Framework, nodeNames [
}
}
return nil
}, "test/e2e/testing-manifests/sample-device-plugin/sample-device-plugin.yaml")
}, e2enode.SampleDevicePluginDSYAML)
framework.ExpectNoError(err, "deploy example device plugin DaemonSet")
// Hard-coded in https://github.com/kubernetes/kubernetes/blob/111a2a0d2dfe13639724506f674bc4f342ccfbab/test/images/sample-device-plugin/sampledeviceplugin.go#L34C17-L34C39.
return "example.com/resource"
return e2enode.SampleDeviceResourceName
}

View file

@ -44,6 +44,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/test/e2e/dra/test-driver/app"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils/ktesting"
admissionapi "k8s.io/pod-security-admission/api"
@ -52,15 +53,12 @@ import (
// ExtendedResourceName returns hard coded extended resource name with a variable
// suffix from the input integer when it's greater than or equal to 0.
// "example.com/resource" is not special, any valid extended resource name can be used
// instead, except when using example device plugin in the test, which hard coded it,
// see test/e2e/dra/deploy_device_plugin.go.
// i == -1 == SingletonIndex is special, the extended resource name has no extra suffix
// and matches the one used by the example device plugin.
func (b *Builder) ExtendedResourceName(i int) string {
switch i {
case SingletonIndex:
return "example.com/resource"
return e2enode.SampleDeviceResourceName
default:
return b.driver.Name + "/resource" + fmt.Sprintf("-%d", i)
}

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
package node
import (
v1 "k8s.io/api/core/v1"
@ -32,6 +32,10 @@ const (
SampleDeviceResourceName = "example.com/resource"
SampleDeviceEnvVarNamePluginSockDir = "PLUGIN_SOCK_DIR"
// SampleDevsAmount is an amount of devices provided by the sample device plugin:
// https://github.com/kubernetes/kubernetes/blob/111a2a0d2dfe13639724506f674bc4f342ccfbab/test/images/sample-device-plugin/sampledeviceplugin.go#L100-L101
SampleDevsAmount int64 = 2
)
// CountSampleDeviceCapacity returns the number of devices of SampleDeviceResourceName advertised by a node capacity

View file

@ -132,7 +132,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana
}
ginkgo.By("Scheduling a sample device plugin pod")
data, err := e2etestfiles.Read(SampleDevicePluginControlRegistrationDSYAML)
data, err := e2etestfiles.Read(e2enode.SampleDevicePluginControlRegistrationDSYAML)
if err != nil {
framework.Fail(err.Error())
}
@ -140,7 +140,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana
dp := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: SampleDevicePluginName,
Name: e2enode.SampleDevicePluginName,
},
Spec: ds.Spec.Template.Spec,
}
@ -182,9 +182,9 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana
var err error
podCMD := "while true; do sleep 1000; done;"
ginkgo.By(fmt.Sprintf("creating a pods requiring %d %q", deviceCount, SampleDeviceResourceName))
ginkgo.By(fmt.Sprintf("creating a pods requiring %d %q", deviceCount, e2enode.SampleDeviceResourceName))
pod := makeBusyboxDeviceRequiringPod(SampleDeviceResourceName, podCMD)
pod := makeBusyboxDeviceRequiringPod(e2enode.SampleDeviceResourceName, podCMD)
testPod := e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.By("making sure all the pods are ready")
@ -341,7 +341,7 @@ func isNodeReadyWithSampleResources(ctx context.Context, f *framework.Framework)
return false, fmt.Errorf("expected node to be ready=%t", ready)
}
if CountSampleDeviceCapacity(node) <= 0 {
if e2enode.CountSampleDeviceCapacity(node) <= 0 {
return false, fmt.Errorf("expected devices to be advertised")
}
return true, nil
@ -374,11 +374,11 @@ func isNodeReadyWithAllocatableSampleResources(ctx context.Context, f *framework
return false, fmt.Errorf("expected node to be ready=%t", ready)
}
if CountSampleDeviceCapacity(node) != devCount {
if e2enode.CountSampleDeviceCapacity(node) != devCount {
return false, fmt.Errorf("expected devices capacity to be: %d", devCount)
}
if CountSampleDeviceAllocatable(node) != devCount {
if e2enode.CountSampleDeviceAllocatable(node) != devCount {
return false, fmt.Errorf("expected devices allocatable to be: %d", devCount)
}
return true, nil
@ -390,7 +390,7 @@ func isNodeReadyWithoutSampleResources(ctx context.Context, f *framework.Framewo
return false, fmt.Errorf("expected node to be ready=%t", ready)
}
if CountSampleDeviceCapacity(node) > 0 {
if e2enode.CountSampleDeviceCapacity(node) > 0 {
return false, fmt.Errorf("expected devices to be not present")
}
return true, nil

View file

@ -80,10 +80,6 @@ func readDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet {
}
const (
// TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
// and then use the same here
expectedSampleDevsAmount int64 = 2
// This is the sleep interval specified in the command executed in the pod to ensure container is running "forever" in the test timescale
sleepIntervalForever string = "24h"
@ -181,16 +177,16 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Waiting for devices to become available on the local node")
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) > 0
return ready && e2enode.CountSampleDeviceCapacity(node) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on local node"))
framework.Logf("Successfully created device plugin pod")
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", e2enode.SampleDevsAmount))
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
e2enode.CountSampleDeviceCapacity(node) == e2enode.SampleDevsAmount &&
e2enode.CountSampleDeviceAllocatable(node) == e2enode.SampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available on local node"))
})
@ -215,7 +211,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Waiting for devices to become unavailable on the local node")
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) <= 0
return ready && e2enode.CountSampleDeviceCapacity(node) <= 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
ginkgo.By("devices now unavailable on the local node")
@ -223,7 +219,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.It("Can schedule a pod that requires a device", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalWithRestart)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -275,8 +271,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(e2enode.SampleDeviceResourceName))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(e2enode.SampleDeviceResourceName))
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
@ -285,7 +281,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
f.It("can make a CDI device accessible in a container", func(ctx context.Context) {
// check if CDI_DEVICE env variable is set
// and only one correspondent device node /tmp/<CDI_DEVICE> is available inside a container
podObj := makeBusyboxPod(SampleDeviceResourceName, "[ $(ls /tmp/CDI-Dev-[1,2] | wc -l) -eq 1 -a -b /tmp/$CDI_DEVICE ]")
podObj := makeBusyboxPod(e2enode.SampleDeviceResourceName, "[ $(ls /tmp/CDI-Dev-[1,2] | wc -l) -eq 1 -a -b /tmp/$CDI_DEVICE ]")
podObj.Spec.RestartPolicy = v1.RestartPolicyNever
pod := e2epod.NewPodClient(f).Create(ctx, podObj)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace))
@ -296,7 +292,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// restarts. For the sake of brevity we however check just the fist restart.
ginkgo.It("Keeps device plugin assignments across pod restarts (no kubelet restart, no device plugin restart)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalWithRestart)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -321,11 +317,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
if err != nil {
framework.ExpectNoError(err, "getting pod resources assignment after pod restart")
}
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after pod restart")
ginkgo.By("Creating another pod")
pod2 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod2 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod2.Name, f.Namespace.Name, 1*time.Minute)
framework.ExpectNoError(err)
@ -340,9 +336,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
if err != nil {
framework.ExpectNoError(err, "getting pod resources assignment after pod restart")
}
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after extra container restart - pod1")
err, _ = checkPodResourcesAssignment(v1PodResources, pod2.Namespace, pod2.Name, pod2.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID2})
err, _ = checkPodResourcesAssignment(v1PodResources, pod2.Namespace, pod2.Name, pod2.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID2})
framework.ExpectNoError(err, "inconsistent device assignment after extra container restart - pod2")
})
@ -352,7 +348,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// and both the device plugin and the actual consumer (container) are stable.
ginkgo.It("Keeps device plugin assignments across kubelet restarts (no pod restart, no device plugin restart)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -372,8 +368,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func() bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
e2enode.CountSampleDeviceCapacity(node) == e2enode.SampleDevsAmount &&
e2enode.CountSampleDeviceAllocatable(node) == e2enode.SampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
ginkgo.By("Checking the same instance of the pod is still running")
@ -397,7 +393,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return err
}, 30*time.Second, framework.Poll).ShouldNot(gomega.HaveOccurred(), "cannot fetch the compute resource assignment after kubelet restart")
err, _ = checkPodResourcesAssignment(v1PodResources, pod2.Namespace, pod2.Name, pod2.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod2.Namespace, pod2.Name, pod2.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after pod restart")
})
@ -406,7 +402,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// performs the device allocation, and both the device plugin is stable.
ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts (no device plugin restart)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalWithRestart)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -462,13 +458,13 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return err
}, 30*time.Second, framework.Poll).ShouldNot(gomega.HaveOccurred(), "cannot fetch the compute resource assignment after kubelet restart")
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after pod restart")
})
ginkgo.It("will not attempt to admit the succeeded pod after the kubelet restart and device plugin removed", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalToCompletion)
podSpec := makeBusyboxPod(SampleDeviceResourceName, podRECMD)
podSpec := makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD)
podSpec.Spec.RestartPolicy = v1.RestartPolicyNever
// Making sure the pod will not be garbage collected and will stay thru the kubelet restart after
// it reached the terminated state. Using finalizers makes the test more reliable.
@ -494,7 +490,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
gomega.Eventually(getNodeResourceValues, devicePluginGracefulTimeout, f.Timeouts.Poll).WithContext(ctx).WithArguments(SampleDeviceResourceName).Should(gomega.Equal(ResourceValue{Allocatable: 0, Capacity: int(expectedSampleDevsAmount)}))
gomega.Eventually(getNodeResourceValues, devicePluginGracefulTimeout, f.Timeouts.Poll).WithContext(ctx).WithArguments(e2enode.SampleDeviceResourceName).Should(gomega.Equal(ResourceValue{Allocatable: 0, Capacity: int(e2enode.SampleDevsAmount)}))
ginkgo.By("Restarting Kubelet")
restartKubelet(ctx, true)
@ -521,7 +517,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// test should pass.
ginkgo.It("Keeps device plugin assignments after the device plugin has restarted (no kubelet restart, pod restart)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -550,8 +546,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func() bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
e2enode.CountSampleDeviceCapacity(node) == e2enode.SampleDevsAmount &&
e2enode.CountSampleDeviceAllocatable(node) == e2enode.SampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after re-registration"))
// crosscheck that after device plugin restart the device assignment is preserved and
@ -565,7 +561,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return err
}, 30*time.Second, framework.Poll).ShouldNot(gomega.HaveOccurred(), "cannot fetch the compute resource assignment after kubelet restart")
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after pod restart")
})
@ -574,7 +570,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// orchestrate the device allocation: the actual consumer (container) is stable.
ginkgo.It("Keeps device plugin assignments after kubelet restart and device plugin restart (no pod restart)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever) // the pod has to run "forever" in the timescale of this test
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
@ -607,7 +603,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return err
}, 30*time.Second, framework.Poll).ShouldNot(gomega.HaveOccurred(), "cannot fetch the compute resource assignment after kubelet restart")
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{devID1})
err, _ = checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{devID1})
framework.ExpectNoError(err, "inconsistent device assignment after pod restart")
ginkgo.By("Re-Register resources by deleting the plugin pod")
@ -625,8 +621,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func() bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
e2enode.CountSampleDeviceCapacity(node) == e2enode.SampleDevsAmount &&
e2enode.CountSampleDeviceAllocatable(node) == e2enode.SampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
ginkgo.By("Checking the same instance of the pod is still running after the device plugin restart")
@ -639,7 +635,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.It("[OrphanedPods] Ensures pods consuming devices deleted while kubelet is down are cleaned up correctly", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalWithRestart)
pod := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
pod := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID, err := parseLog(ctx, f, pod.Name, pod.Name, deviceIDRE)
@ -687,7 +683,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
v1PodResources, err = getV1NodeDevices(ctx)
return err
}, 30*time.Second, framework.Poll).ShouldNot(gomega.HaveOccurred(), "cannot fetch the compute resource assignment after kubelet restart")
err, allocated := checkPodResourcesAssignment(v1PodResources, pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, SampleDeviceResourceName, []string{})
err, allocated := checkPodResourcesAssignment(v1PodResources, pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{})
if err == nil || allocated {
framework.Fail(fmt.Sprintf("stale device assignment after pod deletion while kubelet was down allocated=%v error=%v", allocated, err))
}
@ -696,7 +692,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
f.It("Can schedule a pod with a restartable init container", framework.WithNodeConformance(), func(ctx context.Context) {
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s"
sleepOneSecond := "1s"
rl := v1.ResourceList{v1.ResourceName(SampleDeviceResourceName): *resource.NewQuantity(1, resource.DecimalSI)}
rl := v1.ResourceList{v1.ResourceName(e2enode.SampleDeviceResourceName): *resource.NewQuantity(1, resource.DecimalSI)}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "device-plugin-test-" + string(uuid.NewUUID())},
Spec: v1.PodSpec{
@ -778,11 +774,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
for _, container := range resourcesForOurPod.Containers {
if container.Name == pod1.Spec.InitContainers[1].Name {
gomega.Expect(container.Devices).To(gomega.HaveLen(1))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(e2enode.SampleDeviceResourceName))
gomega.Expect(container.Devices[0].DeviceIds).To(gomega.HaveLen(1))
} else if container.Name == pod1.Spec.Containers[0].Name {
gomega.Expect(container.Devices).To(gomega.HaveLen(1))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(container.Devices[0].ResourceName).To(gomega.Equal(e2enode.SampleDeviceResourceName))
gomega.Expect(container.Devices[0].DeviceIds).To(gomega.HaveLen(1))
} else {
framework.Failf("unexpected container name: %s", container.Name)
@ -854,15 +850,15 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
}
ginkgo.By("Scheduling a sample device plugin pod")
data, err := e2etestfiles.Read(SampleDevicePluginControlRegistrationDSYAML)
data, err := e2etestfiles.Read(e2enode.SampleDevicePluginControlRegistrationDSYAML)
if err != nil {
framework.Fail(fmt.Sprintf("error reading test data %q: %v", SampleDevicePluginControlRegistrationDSYAML, err))
framework.Fail(fmt.Sprintf("error reading test data %q: %v", e2enode.SampleDevicePluginControlRegistrationDSYAML, err))
}
ds := readDaemonSetV1OrDie(data)
dp := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: SampleDevicePluginName,
Name: e2enode.SampleDevicePluginName,
},
Spec: ds.Spec.Template.Spec,
}
@ -892,16 +888,16 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Waiting for devices to become available on the local node")
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) > 0
return ready && e2enode.CountSampleDeviceCapacity(node) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on the local node"))
framework.Logf("Successfully created device plugin pod")
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", e2enode.SampleDevsAmount))
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
e2enode.CountSampleDeviceCapacity(node) == e2enode.SampleDevsAmount &&
e2enode.CountSampleDeviceAllocatable(node) == e2enode.SampleDevsAmount
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected resource exported by the sample device plugin to be available on local node"))
})
@ -930,7 +926,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Waiting for devices to become unavailable on the local node")
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) <= 0
return ready && e2enode.CountSampleDeviceCapacity(node) <= 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
ginkgo.By("devices now unavailable on the local node")
@ -941,7 +937,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
// exposing those devices as resource has restarted. The expected behavior is that the application pod fails at admission time.
framework.It("Does not keep device plugin assignments across node reboots if fails admission (no pod restart, no device plugin re-registration)", func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever)
testPod := makeBusyboxPod(SampleDeviceResourceName, podRECMD)
testPod := makeBusyboxPod(e2enode.SampleDeviceResourceName, podRECMD)
// Set a short termination grace period to speed up the test.
// After kubelet restart, this pod will fail admission and need to be terminated.
// The default 30s grace period causes the test to timeout waiting for the pod
@ -1005,7 +1001,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
framework.Logf("Failed to get node devices: %v, will retry", err)
return true // Return true to continue polling on error
}
_, found := checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, SampleDeviceResourceName, []string{})
_, found := checkPodResourcesAssignment(v1PodResources, pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name, e2enode.SampleDeviceResourceName, []string{})
return found
}, 30*time.Second, framework.Poll).Should(gomega.BeFalseBecause("%s/%s/%s failed admission, so it must not appear in podresources list", pod1.Namespace, pod1.Name, pod1.Spec.Containers[0].Name))
})
@ -1116,7 +1112,7 @@ func matchContainerDevices(ident string, contDevs []*kubeletpodresourcesv1.Conta
// getSampleDevicePluginPod returns the Sample Device Plugin pod to be used e2e tests.
func getSampleDevicePluginPod(pluginSockDir string) *v1.Pod {
data, err := e2etestfiles.Read(SampleDevicePluginDSYAML)
data, err := e2etestfiles.Read(e2enode.SampleDevicePluginDSYAML)
if err != nil {
framework.Fail(err.Error())
}
@ -1124,12 +1120,12 @@ func getSampleDevicePluginPod(pluginSockDir string) *v1.Pod {
ds := readDaemonSetV1OrDie(data)
dp := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: SampleDevicePluginName,
Name: e2enode.SampleDevicePluginName,
},
Spec: ds.Spec.Template.Spec,
}
for i := range dp.Spec.Containers[0].Env {
if dp.Spec.Containers[0].Env[i].Name == SampleDeviceEnvVarNamePluginSockDir {
if dp.Spec.Containers[0].Env[i].Name == e2enode.SampleDeviceEnvVarNamePluginSockDir {
dp.Spec.Containers[0].Env[i].Value = pluginSockDir
}
}

View file

@ -32,6 +32,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
commontest "k8s.io/kubernetes/test/e2e/common"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -78,12 +79,12 @@ func updateImageAllowList(ctx context.Context) {
} else {
e2epod.ImagePrePullList.Insert(sriovDevicePluginImage)
}
if samplePluginImage, err := getContainerImageFromE2ETestDaemonset(SampleDevicePluginDSYAML); err != nil {
if samplePluginImage, err := getContainerImageFromE2ETestDaemonset(e2enode.SampleDevicePluginDSYAML); err != nil {
klog.Errorln(err)
} else {
e2epod.ImagePrePullList.Insert(samplePluginImage)
}
if samplePluginImageCtrlReg, err := getContainerImageFromE2ETestDaemonset(SampleDevicePluginControlRegistrationDSYAML); err != nil {
if samplePluginImageCtrlReg, err := getContainerImageFromE2ETestDaemonset(e2enode.SampleDevicePluginControlRegistrationDSYAML); err != nil {
klog.Errorln(err)
} else {
e2epod.ImagePrePullList.Insert(samplePluginImageCtrlReg)

View file

@ -2107,7 +2107,7 @@ func waitForTopologyUnawareResources(ctx context.Context, f *framework.Framework
gomega.Eventually(ctx, func(ctx context.Context) bool {
node := getLocalNode(ctx, f)
resourceAmount := CountSampleDeviceAllocatable(node)
resourceAmount := e2enode.CountSampleDeviceAllocatable(node)
return resourceAmount > 0
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected %q resources to be available, got no resources", defaultTopologyUnawareResourceName))
}