From 47c3b10e8de073f8d683a4454bc11cf2bb06ada3 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 29 Jan 2026 14:13:26 +0100 Subject: [PATCH] DRA integration: increase timeout, II When running with race detection enabled several tests have suffered from timeouts recently, with no obvious commit which could be blamed for this. Let's use a common constant and make it larger. --- .../integration/dra/binding_conditions_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/integration/dra/binding_conditions_test.go b/test/integration/dra/binding_conditions_test.go index ab5baef46e8..dd8c44d259c 100644 --- a/test/integration/dra/binding_conditions_test.go +++ b/test/integration/dra/binding_conditions_test.go @@ -121,7 +121,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool) start := time.Now() claim1 := createClaim(tCtx, namespace, "-a", class, claim) pod := createPod(tCtx, namespace, "-a", podWithClaimName, claim1) - claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second) + claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout) end := time.Now() gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Devices": gomega.Equal(resourceapi.DeviceAllocationResult{ @@ -143,7 +143,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool) // Second pod should get the device with binding conditions. claim2 := createClaim(tCtx, namespace, "-b", class, claim) pod = createPod(tCtx, namespace, "-b", podWithClaimName, claim2) - claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, 10*time.Second) + claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, schedulingTimeout) end = time.Now() gomega.NewWithT(tCtx).Expect(claim2).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Devices": gomega.Equal(resourceapi.DeviceAllocationResult{ @@ -193,7 +193,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool) ) // allocation restored? - claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, 10*time.Second) + claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, schedulingTimeout) // Now it's safe to set the final binding condition. // Allow the scheduler to proceed. @@ -262,7 +262,7 @@ func testDeviceBindingFailureConditionsReschedule(tCtx ktesting.TContext, useTai // ensuring the initial allocation occurs on the intended node. claim1 := createClaim(tCtx, namespace, "-a", class, claim) pod := createPod(tCtx, namespace, "-a", podWithClaimName, claim1) - claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second) + claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout) gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Devices": gomega.Equal(resourceapi.DeviceAllocationResult{ Results: []resourceapi.DeviceRequestAllocationResult{{ @@ -364,7 +364,7 @@ func testDeviceBindingFailureConditionsReschedule(tCtx ktesting.TContext, useTai ) // allocation restored? - claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second) + claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout) gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Devices": gomega.Equal(resourceapi.DeviceAllocationResult{ @@ -424,7 +424,7 @@ profiles: pod := createPod(tCtx, namespace, "-timeout-enforced", podWithClaimName, claim1) // Wait until the claim is allocated. - allocatedClaim := waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second) + allocatedClaim := waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout) gomega.NewWithT(tCtx).Expect(allocatedClaim).To(gomega.HaveField( "Status.Allocation", @@ -519,7 +519,7 @@ profiles: claim1 := createClaim(tCtx, namespace, "-timeout", class, claim) pod := createPod(tCtx, namespace, "-timeout", podWithClaimName, claim1) - claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second) + claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout) gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField( "Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ @@ -564,7 +564,7 @@ profiles: } _, err = tCtx.Client().ResourceV1().ResourceSlices().Get(tCtx, sliceWithoutBinding.Name, metav1.GetOptions{}) return err - }).WithTimeout(10*time.Second).WithPolling(300*time.Millisecond).Should( + }).WithTimeout(schedulingTimeout).WithPolling(300*time.Millisecond).Should( gomega.Succeed(), "slice must be created before binding timeout") // Wait until the binding timeout occurs. @@ -584,7 +584,7 @@ profiles: // Verify recovery to the newly added device without BindingConditions through rescheduling triggered by binding timeout. tCtx.Eventually(func(tCtx ktesting.TContext) (*resourceapi.ResourceClaim, error) { return tCtx.Client().ResourceV1().ResourceClaims(namespace).Get(tCtx, claim1.Name, metav1.GetOptions{}) - }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(gomega.HaveField( + }).WithTimeout(schedulingTimeout).WithPolling(1*time.Second).Should(gomega.HaveField( "Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ "Devices": gomega.Equal(resourceapi.DeviceAllocationResult{