mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-18 18:28:18 -05:00
Remove unneeded use of fmt.Sprintf in test/{integration,e2e}
This commit is contained in:
parent
669b1de008
commit
7883039b31
13 changed files with 23 additions and 32 deletions
|
|
@ -163,7 +163,7 @@ var _ = SIGDescribe("Downward API", func() {
|
|||
}
|
||||
|
||||
expectations := []string{
|
||||
fmt.Sprintf("OK"),
|
||||
"OK",
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
|
|
|
|||
|
|
@ -684,7 +684,7 @@ func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.I
|
|||
framework.Failf(invalidKind)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Running controller"))
|
||||
ginkgo.By("Running controller")
|
||||
controllerName := name + "-ctrl"
|
||||
_, err = createService(ctx, c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort)
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
|||
|
|
@ -213,14 +213,14 @@ func CreateIngressComformanceTests(ctx context.Context, jig *TestJig, ns string,
|
|||
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||
tests := []ConformanceTests{
|
||||
{
|
||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||
"should create a basic HTTP ingress",
|
||||
func() { jig.CreateIngress(ctx, manifestPath, ns, annotations, annotations) },
|
||||
fmt.Sprintf("waiting for urls on basic HTTP ingress"),
|
||||
"waiting for urls on basic HTTP ingress",
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should terminate TLS for host %v", tlsHost),
|
||||
func() { jig.SetHTTPS(ctx, tlsSecretName, tlsHost) },
|
||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||
"waiting for HTTPS updates to reflect in ingress",
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
|
|
@ -256,7 +256,7 @@ func CreateIngressComformanceTests(ctx context.Context, jig *TestJig, ns string,
|
|||
route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail)
|
||||
framework.ExpectNoError(PollURL(ctx, route, updateURLMapHost, e2eservice.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true))
|
||||
},
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
"Waiting for path updates to reflect in L7",
|
||||
},
|
||||
}
|
||||
// Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141.
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -133,7 +132,7 @@ func CreateServiceForSimpleAppWithPods(ctx context.Context, c clientset.Interfac
|
|||
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
|
||||
func CreateServiceForSimpleApp(ctx context.Context, c clientset.Interface, contPort, svcPort int, namespace, appName string) *v1.Service {
|
||||
if appName == "" {
|
||||
panic(fmt.Sprintf("no app name provided"))
|
||||
panic("no app name provided")
|
||||
}
|
||||
|
||||
serviceSelector := map[string]string{
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
|
|||
return true, nil
|
||||
})
|
||||
if pvcPollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
|
||||
errList = append(errList, "Timeout waiting for pvc deletion.")
|
||||
}
|
||||
|
||||
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
|
|
@ -138,7 +138,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
|
|||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||
errList = append(errList, "Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs.")
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
framework.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
|
|
|
|||
|
|
@ -205,7 +205,7 @@ func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod,
|
|||
return false
|
||||
}
|
||||
if !trackerFulfilled {
|
||||
checkAffinityFailed(tracker, fmt.Sprintf("Connection timed out or not enough responses."))
|
||||
checkAffinityFailed(tracker, "Connection timed out or not enough responses.")
|
||||
}
|
||||
if shouldHold {
|
||||
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package autoscaling
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
|
|
@ -69,7 +68,7 @@ func (t *HPAUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
|||
// Test waits for upgrade to complete and verifies if HPA works correctly.
|
||||
func (t *HPAUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
|
||||
// Block until upgrade is done
|
||||
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
|
||||
ginkgo.By("Waiting for upgrade to finish before checking HPA")
|
||||
<-done
|
||||
t.test(ctx)
|
||||
}
|
||||
|
|
@ -85,19 +84,19 @@ func (t *HPAUpgradeTest) test(ctx context.Context) {
|
|||
const timeToWait = 15 * time.Minute
|
||||
t.rc.Resume(ctx)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
|
||||
ginkgo.By("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1.")
|
||||
t.rc.ConsumeCPU(10) /* millicores */
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 1 replica"))
|
||||
ginkgo.By("HPA waits for 1 replica")
|
||||
t.rc.WaitForReplicas(ctx, 1, timeToWait)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
|
||||
ginkgo.By("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores.")
|
||||
t.rc.ConsumeCPU(250) /* millicores */
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas"))
|
||||
ginkgo.By("HPA waits for 3 replicas")
|
||||
t.rc.WaitForReplicas(ctx, 3, timeToWait)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
|
||||
ginkgo.By("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5.")
|
||||
t.rc.ConsumeCPU(700) /* millicores */
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas"))
|
||||
ginkgo.By("HPA waits for 5 replicas")
|
||||
t.rc.WaitForReplicas(ctx, 5, timeToWait)
|
||||
|
||||
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.
|
||||
|
|
|
|||
|
|
@ -1059,10 +1059,7 @@ func testPublishResourceSlices(tCtx ktesting.TContext, haveLatestAPI bool, disab
|
|||
factor := time.Duration(10)
|
||||
mutationCacheTTL := resourceslice.DefaultMutationCacheTTL / factor
|
||||
syncDelay := resourceslice.DefaultSyncDelay / factor
|
||||
quiesencePeriod := syncDelay
|
||||
if mutationCacheTTL > quiesencePeriod {
|
||||
quiesencePeriod = mutationCacheTTL
|
||||
}
|
||||
quiesencePeriod := max(mutationCacheTTL, syncDelay)
|
||||
quiesencePeriod += 10 * time.Second
|
||||
|
||||
var gotDroppedFieldError atomic.Bool
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
Reason: "schedulable condition",
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -368,7 +368,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
|||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
Reason: "schedulable condition",
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
node := &v1.Node{
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ package scheduler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -64,7 +63,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
Reason: "schedulable condition",
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
// Create a new schedulable node, since we're first going to apply
|
||||
|
|
|
|||
|
|
@ -118,10 +118,7 @@ func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
|||
}
|
||||
}
|
||||
|
||||
workers := op.Count
|
||||
if workers > 30 {
|
||||
workers = 30
|
||||
}
|
||||
workers := min(op.Count, 30)
|
||||
workqueue.ParallelizeUntil(tCtx, workers, op.Count, create)
|
||||
if createErr != nil {
|
||||
tCtx.Fatal(createErr.Error())
|
||||
|
|
|
|||
|
|
@ -1276,7 +1276,7 @@ func makeNode(index int) *v1.Node {
|
|||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
Reason: "schedulable condition",
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
|
|
|
|||
Loading…
Reference in a new issue