Move interfaces: Handle and Plugin and related types from kubernetes/kubernetes to staging repo kube-scheduler

This commit is contained in:
Ania Borowiec 2025-07-24 11:48:07 +00:00
parent cb163eb55b
commit fadb40199f
No known key found for this signature in database
105 changed files with 2091 additions and 1959 deletions

View file

@ -44,7 +44,6 @@ import (
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
func TestSetup(t *testing.T) {
@ -510,22 +509,22 @@ leaderElection:
// Simulates an out-of-tree plugin.
type foo struct{}
var _ framework.PreFilterPlugin = &foo{}
var _ framework.FilterPlugin = &foo{}
var _ fwk.PreFilterPlugin = &foo{}
var _ fwk.FilterPlugin = &foo{}
func (*foo) Name() string {
return "Foo"
}
func newFoo(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newFoo(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &foo{}, nil
}
func (*foo) PreFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (*foo) PreFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
return nil, nil
}
func (*foo) PreFilterExtensions() framework.PreFilterExtensions {
func (*foo) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}

View file

@ -23,7 +23,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// APICache is responsible for sending API calls' requests through scheduling queue or cache.
@ -42,7 +41,7 @@ func New(schedulingQueue internalqueue.SchedulingQueue, cache internalcache.Cach
// PatchPodStatus sends a patch request for a Pod's status through a scheduling queue.
// The patch could be first applied to the cached Pod object and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
func (c *APICache) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) (<-chan error, error) {
func (c *APICache) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error) {
return c.schedulingQueue.PatchPodStatus(pod, condition, nominatingInfo)
}

View file

@ -43,7 +43,7 @@ type Snapshot struct {
generation int64
}
var _ framework.SharedLister = &Snapshot{}
var _ fwk.SharedLister = &Snapshot{}
// NewEmptySnapshot initializes a Snapshot struct and returns it.
func NewEmptySnapshot() *Snapshot {
@ -156,12 +156,12 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.Set[string] {
}
// NodeInfos returns a NodeInfoLister.
func (s *Snapshot) NodeInfos() framework.NodeInfoLister {
func (s *Snapshot) NodeInfos() fwk.NodeInfoLister {
return s
}
// StorageInfos returns a StorageInfoLister.
func (s *Snapshot) StorageInfos() framework.StorageInfoLister {
func (s *Snapshot) StorageInfos() fwk.StorageInfoLister {
return s
}

View file

@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/backend/heap"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
@ -100,13 +101,13 @@ type backoffQueue struct {
podMaxBackoff time.Duration
// activeQLessFn is used as an eventual less function if two backoff times are equal,
// when the SchedulerPopFromBackoffQ feature is enabled.
activeQLessFn framework.LessFunc
activeQLessFn fwk.LessFunc
// isPopFromBackoffQEnabled indicates whether the feature gate SchedulerPopFromBackoffQ is enabled.
isPopFromBackoffQEnabled bool
}
func newBackoffQueue(clock clock.WithTicker, podInitialBackoffDuration time.Duration, podMaxBackoffDuration time.Duration, activeQLessFn framework.LessFunc, popFromBackoffQEnabled bool) *backoffQueue {
func newBackoffQueue(clock clock.WithTicker, podInitialBackoffDuration time.Duration, podMaxBackoffDuration time.Duration, activeQLessFn fwk.LessFunc, popFromBackoffQEnabled bool) *backoffQueue {
bq := &backoffQueue{
clock: clock,
podInitialBackoff: podInitialBackoffDuration,

View file

@ -26,7 +26,6 @@ import (
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// nominator is a structure that stores pods nominated to run on nodes.
@ -65,21 +64,21 @@ func newPodNominator(podLister listersv1.PodLister) *nominator {
// This is called during the preemption process when a node is nominated to run
// the pod. We update the nominator's structure before sending an API request to update the pod
// object, to avoid races with the following scheduling cycles.
func (npm *nominator) addNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *framework.NominatingInfo) {
func (npm *nominator) addNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
npm.nLock.Lock()
npm.addNominatedPodUnlocked(logger, pi, nominatingInfo)
npm.nLock.Unlock()
}
func (npm *nominator) addNominatedPodUnlocked(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *framework.NominatingInfo) {
func (npm *nominator) addNominatedPodUnlocked(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
// Always delete the pod if it already exists, to ensure we never store more than
// one instance of the pod.
npm.deleteUnlocked(pi.GetPod())
var nodeName string
if nominatingInfo.Mode() == framework.ModeOverride {
if nominatingInfo.Mode() == fwk.ModeOverride {
nodeName = nominatingInfo.NominatedNodeName
} else if nominatingInfo.Mode() == framework.ModeNoop {
} else if nominatingInfo.Mode() == fwk.ModeNoop {
if pi.GetPod().Status.NominatedNodeName == "" {
return
}
@ -116,7 +115,7 @@ func (npm *nominator) UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, new
// In some cases, an Update event with no "NominatedNode" present is received right
// after a node("NominatedNode") is reserved for this pod in memory.
// In this case, we need to keep reserving the NominatedNode when updating the pod pointer.
var nominatingInfo *framework.NominatingInfo
var nominatingInfo *fwk.NominatingInfo
// We won't fall into below `if` block if the Update event represents:
// (1) NominatedNode info is added
// (2) NominatedNode info is updated
@ -124,8 +123,8 @@ func (npm *nominator) UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, new
if nominatedNodeName(oldPod) == "" && nominatedNodeName(newPodInfo.GetPod()) == "" {
if nnn, ok := npm.nominatedPodToNode[oldPod.UID]; ok {
// This is the only case we should continue reserving the NominatedNode
nominatingInfo = &framework.NominatingInfo{
NominatingMode: framework.ModeOverride,
nominatingInfo = &fwk.NominatingInfo{
NominatingMode: fwk.ModeOverride,
NominatedNodeName: nnn,
}
}

View file

@ -49,7 +49,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/backend/heap"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
apicalls "k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/metrics"
@ -92,7 +92,7 @@ type PreEnqueueCheck func(pod *v1.Pod) bool
// The interface follows a pattern similar to cache.FIFO and cache.Heap and
// makes it easy to use those data structures as a SchedulingQueue.
type SchedulingQueue interface {
framework.PodNominator
fwk.PodNominator
Add(logger klog.Logger, pod *v1.Pod)
// Activate moves the given pods to activeQ.
// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight,
@ -131,7 +131,7 @@ type SchedulingQueue interface {
// PatchPodStatus handles the pod status update by sending an update API call through API dispatcher.
// This method should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) (<-chan error, error)
PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error)
// The following functions are supposed to be used only for testing or debugging.
GetPod(name, namespace string) (*framework.QueuedPodInfo, bool)
@ -145,7 +145,7 @@ type SchedulingQueue interface {
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
func NewSchedulingQueue(
lessFn framework.LessFunc,
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option) SchedulingQueue {
return NewPriorityQueue(lessFn, informerFactory, opts...)
@ -186,7 +186,7 @@ type PriorityQueue struct {
moveRequestCycle int64
// preEnqueuePluginMap is keyed with profile and plugin name, valued with registered preEnqueue plugins.
preEnqueuePluginMap map[string]map[string]framework.PreEnqueuePlugin
preEnqueuePluginMap map[string]map[string]fwk.PreEnqueuePlugin
// queueingHintMap is keyed with profile name, valued with registered queueing hint functions.
queueingHintMap QueueingHintMapPerProfile
// pluginToEventsMap shows which plugin is interested in which events.
@ -231,7 +231,7 @@ type priorityQueueOptions struct {
podLister listersv1.PodLister
metricsRecorder *metrics.MetricAsyncRecorder
pluginMetricsSamplePercent int
preEnqueuePluginMap map[string]map[string]framework.PreEnqueuePlugin
preEnqueuePluginMap map[string]map[string]fwk.PreEnqueuePlugin
queueingHintMap QueueingHintMapPerProfile
apiDispatcher fwk.APIDispatcher
}
@ -288,7 +288,7 @@ func WithQueueingHintMapPerProfile(m QueueingHintMapPerProfile) Option {
}
// WithPreEnqueuePluginMap sets preEnqueuePluginMap for PriorityQueue.
func WithPreEnqueuePluginMap(m map[string]map[string]framework.PreEnqueuePlugin) Option {
func WithPreEnqueuePluginMap(m map[string]map[string]fwk.PreEnqueuePlugin) Option {
return func(o *priorityQueueOptions) {
o.preEnqueuePluginMap = m
}
@ -337,7 +337,7 @@ func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.Queued
// NewPriorityQueue creates a PriorityQueue object.
func NewPriorityQueue(
lessFn framework.LessFunc,
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option,
) *PriorityQueue {
@ -381,8 +381,8 @@ func NewPriorityQueue(
return pq
}
// Helper function that wraps framework.LessFunc and converts it to take *framework.QueuedPodInfo as arguments.
func convertLessFn(lessFn framework.LessFunc) func(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
// Helper function that wraps fwk.LessFunc and converts it to take *framework.QueuedPodInfo as arguments.
func convertLessFn(lessFn fwk.LessFunc) func(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
return func(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
return lessFn(podInfo1, podInfo2)
}
@ -599,7 +599,7 @@ func (p *PriorityQueue) runPreEnqueuePlugins(ctx context.Context, pInfo *framewo
}
// runPreEnqueuePlugin runs the PreEnqueue plugin and update pInfo's fields accordingly if needed.
func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, logger klog.Logger, pl framework.PreEnqueuePlugin, pInfo *framework.QueuedPodInfo, shouldRecordMetric bool) *fwk.Status {
func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, logger klog.Logger, pl fwk.PreEnqueuePlugin, pInfo *framework.QueuedPodInfo, shouldRecordMetric bool) *fwk.Status {
pod := pInfo.Pod
startTime := p.clock.Now()
s := pl.PreEnqueue(ctx, pod)
@ -625,7 +625,7 @@ func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, logger klog.Log
// AddNominatedPod adds the given pod to the nominator.
// It locks the PriorityQueue to make sure it won't race with any other method.
func (p *PriorityQueue) AddNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *framework.NominatingInfo) {
func (p *PriorityQueue) AddNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
p.lock.Lock()
p.nominator.addNominatedPod(logger, pi, nominatingInfo)
p.lock.Unlock()
@ -1355,7 +1355,7 @@ func (p *PriorityQueue) PendingPods() ([]*v1.Pod, string) {
// PatchPodStatus handles the pod status update by sending an update API call through API dispatcher.
// This method should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
func (p *PriorityQueue) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) (<-chan error, error) {
func (p *PriorityQueue) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error) {
// Don't store anything in the cache. This might be extended in the next releases.
onFinish := make(chan error, 1)
err := p.apiDispatcher.Add(apicalls.Implementations.PodStatusPatch(pod, condition, nominatingInfo), fwk.APICallOptions{

View file

@ -166,7 +166,7 @@ func TestPriorityQueue_Add(t *testing.T) {
}
}
func newDefaultQueueSort() framework.LessFunc {
func newDefaultQueueSort() fwk.LessFunc {
sort := &queuesort.PrioritySort{}
return sort.Less
}
@ -1547,7 +1547,7 @@ func (pl *preEnqueuePlugin) PreEnqueue(ctx context.Context, p *v1.Pod) *fwk.Stat
func TestPriorityQueue_moveToActiveQ(t *testing.T) {
tests := []struct {
name string
plugins []framework.PreEnqueuePlugin
plugins []fwk.PreEnqueuePlugin
pod *v1.Pod
event string
movesFromBackoffQ bool
@ -1564,7 +1564,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod name not in allowlists",
plugins: []framework.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
plugins: []fwk.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
pod: st.MakePod().Name("p").Label("p", "").Obj(),
event: framework.EventUnscheduledPodAdd.Label(),
wantUnschedulablePods: 1,
@ -1572,7 +1572,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod failed one preEnqueue plugin",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1583,7 +1583,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, preEnqueue rejects the pod, even if it is after backoff",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1597,7 +1597,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
// With SchedulerPopFromBackoffQ enabled, the queue assumes the pod has already passed PreEnqueue,
// and it doesn't run PreEnqueue again, always puts the pod to activeQ.
name: "preEnqueue plugin registered, pod would fail one preEnqueue plugin, but it is moved from backoffQ after completing backoff, so preEnqueue is not executed",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1610,7 +1610,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod failed one preEnqueue plugin when activated from unschedulablePods",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1623,7 +1623,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod would fail one preEnqueue plugin, but was activated from backoffQ, so preEnqueue is not executed",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1636,7 +1636,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod passed all preEnqueue plugins",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"bar"}},
},
@ -1658,7 +1658,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
m := map[string]map[string]framework.PreEnqueuePlugin{"": make(map[string]framework.PreEnqueuePlugin, len(tt.plugins))}
m := map[string]map[string]fwk.PreEnqueuePlugin{"": make(map[string]fwk.PreEnqueuePlugin, len(tt.plugins))}
for _, plugin := range tt.plugins {
m[""][plugin.Name()] = plugin
}
@ -1688,7 +1688,7 @@ func TestPriorityQueue_moveToActiveQ(t *testing.T) {
func TestPriorityQueue_moveToBackoffQ(t *testing.T) {
tests := []struct {
name string
plugins []framework.PreEnqueuePlugin
plugins []fwk.PreEnqueuePlugin
pod *v1.Pod
popFromBackoffQEnabled []bool
wantSuccess bool
@ -1700,21 +1700,21 @@ func TestPriorityQueue_moveToBackoffQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod name would not be in allowlists",
plugins: []framework.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
plugins: []fwk.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
pod: st.MakePod().Name("p").Label("p", "").Obj(),
popFromBackoffQEnabled: []bool{false},
wantSuccess: true,
},
{
name: "preEnqueue plugin registered, pod name not in allowlists",
plugins: []framework.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
plugins: []fwk.PreEnqueuePlugin{&preEnqueuePlugin{}, &preEnqueuePlugin{}},
pod: st.MakePod().Name("p").Label("p", "").Obj(),
popFromBackoffQEnabled: []bool{true},
wantSuccess: false,
},
{
name: "preEnqueue plugin registered, preEnqueue plugin would reject the pod, but isn't run",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1724,7 +1724,7 @@ func TestPriorityQueue_moveToBackoffQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod failed one preEnqueue plugin",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"foo"}},
},
@ -1734,7 +1734,7 @@ func TestPriorityQueue_moveToBackoffQ(t *testing.T) {
},
{
name: "preEnqueue plugin registered, pod passed all preEnqueue plugins",
plugins: []framework.PreEnqueuePlugin{
plugins: []fwk.PreEnqueuePlugin{
&preEnqueuePlugin{allowlists: []string{"foo", "bar"}},
&preEnqueuePlugin{allowlists: []string{"bar"}},
},
@ -1754,7 +1754,7 @@ func TestPriorityQueue_moveToBackoffQ(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
m := map[string]map[string]framework.PreEnqueuePlugin{"": make(map[string]framework.PreEnqueuePlugin, len(tt.plugins))}
m := map[string]map[string]fwk.PreEnqueuePlugin{"": make(map[string]fwk.PreEnqueuePlugin, len(tt.plugins))}
for _, plugin := range tt.plugins {
m[""][plugin.Name()] = plugin
}
@ -1988,8 +1988,8 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithQueueingHint(t *testing.
}
cl := testingclock.NewFakeClock(now)
plugin, _ := schedulinggates.New(ctx, nil, nil, plfeature.Features{})
preEnqM := map[string]map[string]framework.PreEnqueuePlugin{"": {
names.SchedulingGates: plugin.(framework.PreEnqueuePlugin),
preEnqM := map[string]map[string]fwk.PreEnqueuePlugin{"": {
names.SchedulingGates: plugin.(fwk.PreEnqueuePlugin),
"foo": &preEnqueuePlugin{allowlists: []string{"foo"}},
}}
q := NewTestQueue(ctx, newDefaultQueueSort(), WithQueueingHintMapPerProfile(m), WithClock(cl), WithPreEnqueuePluginMap(preEnqM))
@ -2661,11 +2661,11 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
q.Add(logger, medPriorityPodInfo.Pod)
// Update unschedulablePodInfo on a different node than specified in the pod.
q.AddNominatedPod(logger, mustNewTestPodInfo(t, unschedulablePodInfo.Pod),
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node5"})
&fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node5"})
// Update nominated node name of a pod on a node that is not specified in the pod object.
q.AddNominatedPod(logger, mustNewTestPodInfo(t, highPriorityPodInfo.Pod),
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node2"})
&fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node2"})
expectedNominatedPods := &nominator{
nominatedPodToNode: map[types.UID]string{
medPriorityPodInfo.Pod.UID: "node1",
@ -2690,7 +2690,7 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
}
// Update one of the nominated pods that doesn't have nominatedNodeName in the
// pod object. It should be updated correctly.
q.AddNominatedPod(logger, highPriorityPodInfo, &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node4"})
q.AddNominatedPod(logger, highPriorityPodInfo, &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node4"})
expectedNominatedPods = &nominator{
nominatedPodToNode: map[types.UID]string{
medPriorityPodInfo.Pod.UID: "node1",
@ -2709,7 +2709,7 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
// Attempt to nominate a pod that was deleted from the informer cache.
// Nothing should change.
q.AddNominatedPod(logger, nonExistentPodInfo, &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"})
q.AddNominatedPod(logger, nonExistentPodInfo, &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node1"})
if diff := cmp.Diff(q.nominator, expectedNominatedPods, nominatorCmpOpts...); diff != "" {
t.Errorf("Unexpected diff after nominating a deleted pod (-want, +got):\n%s", diff)
}
@ -2717,7 +2717,7 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
// Nothing should change.
scheduledPodCopy := scheduledPodInfo.Pod.DeepCopy()
scheduledPodInfo.Pod.Spec.NodeName = ""
q.AddNominatedPod(logger, mustNewTestPodInfo(t, scheduledPodCopy), &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"})
q.AddNominatedPod(logger, mustNewTestPodInfo(t, scheduledPodCopy), &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node1"})
if diff := cmp.Diff(q.nominator, expectedNominatedPods, nominatorCmpOpts...); diff != "" {
t.Errorf("Unexpected diff after nominating a scheduled pod (-want, +got):\n%s", diff)
}
@ -3568,7 +3568,7 @@ scheduler_plugin_execution_duration_seconds_count{extension_point="PreEnqueue",p
QueueingHintFn: queueHintReturnQueue,
},
}
preenq := map[string]map[string]framework.PreEnqueuePlugin{"": {(&preEnqueuePlugin{}).Name(): &preEnqueuePlugin{allowlists: []string{queueable}}}}
preenq := map[string]map[string]fwk.PreEnqueuePlugin{"": {(&preEnqueuePlugin{}).Name(): &preEnqueuePlugin{allowlists: []string{queueable}}}}
recorder := metrics.NewMetricsAsyncRecorder(3, 20*time.Microsecond, ctx.Done())
queue := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp)), WithPreEnqueuePluginMap(preenq), WithPluginMetricsSamplePercent(test.pluginMetricsSamplePercent), WithMetricsRecorder(recorder), WithQueueingHintMapPerProfile(m))
for i, op := range test.operations {
@ -3659,7 +3659,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
name: "A gated pod is created and scheduled after lifting gate",
perPodSchedulingMetricsScenario: func(c *testingclock.FakeClock, queue *PriorityQueue, pod *v1.Pod) {
// Create a queue with PreEnqueuePlugin
queue.preEnqueuePluginMap = map[string]map[string]framework.PreEnqueuePlugin{"": {(&preEnqueuePlugin{}).Name(): &preEnqueuePlugin{allowlists: []string{"foo"}}}}
queue.preEnqueuePluginMap = map[string]map[string]fwk.PreEnqueuePlugin{"": {(&preEnqueuePlugin{}).Name(): &preEnqueuePlugin{allowlists: []string{"foo"}}}}
queue.pluginMetricsSamplePercent = 0
queue.Add(logger, pod)
// Check pod is added to the unschedulablePods queue.
@ -4309,7 +4309,7 @@ func Test_isPodWorthRequeuing(t *testing.T) {
func Test_queuedPodInfo_gatedSetUponCreationAndUnsetUponUpdate(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
plugin, _ := schedulinggates.New(ctx, nil, nil, plfeature.Features{})
m := map[string]map[string]framework.PreEnqueuePlugin{"": {names.SchedulingGates: plugin.(framework.PreEnqueuePlugin)}}
m := map[string]map[string]fwk.PreEnqueuePlugin{"": {names.SchedulingGates: plugin.(fwk.PreEnqueuePlugin)}}
q := NewTestQueue(ctx, newDefaultQueueSort(), WithPreEnqueuePluginMap(m))
gatedPod := st.MakePod().SchedulingGates([]string{"hello world"}).Obj()

View file

@ -23,12 +23,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// NewTestQueue creates a priority queue with an empty informer factory.
func NewTestQueue(ctx context.Context, lessFn framework.LessFunc, opts ...Option) *PriorityQueue {
func NewTestQueue(ctx context.Context, lessFn fwk.LessFunc, opts ...Option) *PriorityQueue {
return NewTestQueueWithObjects(ctx, lessFn, nil, opts...)
}
@ -36,7 +36,7 @@ func NewTestQueue(ctx context.Context, lessFn framework.LessFunc, opts ...Option
// populated with the provided objects.
func NewTestQueueWithObjects(
ctx context.Context,
lessFn framework.LessFunc,
lessFn fwk.LessFunc,
objs []runtime.Object,
opts ...Option,
) *PriorityQueue {
@ -52,7 +52,7 @@ func NewTestQueueWithObjects(
func NewTestQueueWithInformerFactory(
ctx context.Context,
lessFn framework.LessFunc,
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option,
) *PriorityQueue {

View file

@ -217,7 +217,7 @@ func TestEventHandlers_MoveToActiveOnNominatedNodeUpdate(t *testing.T) {
}
}
func newDefaultQueueSort() framework.LessFunc {
func newDefaultQueueSort() fwk.LessFunc {
sort := &queuesort.PrioritySort{}
return sort.Less
}

View file

@ -85,7 +85,7 @@ func makeTransport(config *schedulerapi.Extender) (http.RoundTripper, error) {
}
// NewHTTPExtender creates an HTTPExtender object.
func NewHTTPExtender(config *schedulerapi.Extender) (framework.Extender, error) {
func NewHTTPExtender(config *schedulerapi.Extender) (fwk.Extender, error) {
if config.HTTPTimeout.Duration.Nanoseconds() == 0 {
config.HTTPTimeout.Duration = time.Duration(DefaultExtenderTimeout)
}
@ -137,7 +137,7 @@ func (h *HTTPExtender) SupportsPreemption() bool {
func (h *HTTPExtender) ProcessPreemption(
pod *v1.Pod,
nodeNameToVictims map[string]*extenderv1.Victims,
nodeInfos framework.NodeInfoLister,
nodeInfos fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
var (
result extenderv1.ExtenderPreemptionResult
@ -180,7 +180,7 @@ func (h *HTTPExtender) ProcessPreemption(
// such as UIDs and names, to object pointers.
func (h *HTTPExtender) convertToVictims(
nodeNameToMetaVictims map[string]*extenderv1.MetaVictims,
nodeInfos framework.NodeInfoLister,
nodeInfos fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
nodeNameToVictims := map[string]*extenderv1.Victims{}
for nodeName, metaVictims := range nodeNameToMetaVictims {

View file

@ -325,7 +325,7 @@ func TestSchedulerWithExtenders(t *testing.T) {
client := clientsetfake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
var extenders []framework.Extender
var extenders []fwk.Extender
for ii := range test.extenders {
extenders = append(extenders, &test.extenders[ii])
}
@ -501,7 +501,7 @@ func TestConvertToVictims(t *testing.T) {
nodeNameToMetaVictims map[string]*extenderv1.MetaVictims
nodeNames []string
podsInNodeList []*v1.Pod
nodeInfos framework.NodeInfoLister
nodeInfos fwk.NodeInfoLister
want map[string]*extenderv1.Victims
wantErr bool
}{

View file

@ -18,7 +18,6 @@ package apicalls
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
const (
@ -39,7 +38,7 @@ var Relevances = fwk.APICallRelevances{
// Implementation is a built-in mapping types to calls' constructors.
// It's used to construct calls' objects in the scheduler framework and for easier replacement of those.
// This mapping can be replaced by the out-of-tree plugin in its init() function, if needed.
var Implementations = framework.APICallImplementations[*PodStatusPatchCall, *PodBindingCall]{
var Implementations = fwk.APICallImplementations[*PodStatusPatchCall, *PodBindingCall]{
PodStatusPatch: NewPodStatusPatchCall,
PodBinding: NewPodBindingCall,
}

View file

@ -28,7 +28,6 @@ import (
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/util"
)
@ -48,10 +47,10 @@ type PodStatusPatchCall struct {
// newCondition is a condition to update.
newCondition *v1.PodCondition
// nominatingInfo is a nominating info to update.
nominatingInfo *framework.NominatingInfo
nominatingInfo *fwk.NominatingInfo
}
func NewPodStatusPatchCall(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) *PodStatusPatchCall {
func NewPodStatusPatchCall(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) *PodStatusPatchCall {
return &PodStatusPatchCall{
podUID: pod.UID,
podRef: klog.KObj(pod),
@ -70,8 +69,8 @@ func (psuc *PodStatusPatchCall) UID() types.UID {
}
// syncStatus syncs the given status with condition and nominatingInfo. It returns true if anything was actually updated.
func syncStatus(status *v1.PodStatus, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) bool {
nnnNeedsUpdate := nominatingInfo.Mode() == framework.ModeOverride && status.NominatedNodeName != nominatingInfo.NominatedNodeName
func syncStatus(status *v1.PodStatus, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) bool {
nnnNeedsUpdate := nominatingInfo.Mode() == fwk.ModeOverride && status.NominatedNodeName != nominatingInfo.NominatedNodeName
if condition != nil {
if !podutil.UpdatePodCondition(status, condition) && !nnnNeedsUpdate {
return false
@ -144,7 +143,7 @@ func (psuc *PodStatusPatchCall) Merge(oldCall fwk.APICall) error {
if !ok {
return fmt.Errorf("unexpected error: call of type %T is not of type *PodStatusPatchCall", oldCall)
}
if psuc.nominatingInfo.Mode() == framework.ModeNoop && oldPsuc.nominatingInfo.Mode() == framework.ModeOverride {
if psuc.nominatingInfo.Mode() == fwk.ModeNoop && oldPsuc.nominatingInfo.Mode() == fwk.ModeOverride {
// Set a nominatingInfo from an old call if the new one is no-op.
psuc.nominatingInfo = oldPsuc.nominatingInfo
}
@ -173,7 +172,7 @@ func conditionNeedsUpdate(status *v1.PodStatus, condition *v1.PodCondition) bool
}
func (psuc *PodStatusPatchCall) IsNoOp() bool {
nnnNeedsUpdate := psuc.nominatingInfo.Mode() == framework.ModeOverride && psuc.podStatus.NominatedNodeName != psuc.nominatingInfo.NominatedNodeName
nnnNeedsUpdate := psuc.nominatingInfo.Mode() == fwk.ModeOverride && psuc.podStatus.NominatedNodeName != psuc.nominatingInfo.NominatedNodeName
if nnnNeedsUpdate {
return false
}

View file

@ -25,7 +25,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
)
func TestPodStatusPatchCall_IsNoOp(t *testing.T) {
@ -48,42 +48,42 @@ func TestPodStatusPatchCall_IsNoOp(t *testing.T) {
name string
pod *v1.Pod
condition *v1.PodCondition
nominatingInfo *framework.NominatingInfo
nominatingInfo *fwk.NominatingInfo
want bool
}{
{
name: "No-op when condition and node name match",
pod: podWithNode,
condition: &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionFalse},
nominatingInfo: &framework.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: framework.ModeOverride},
nominatingInfo: &fwk.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: fwk.ModeOverride},
want: true,
},
{
name: "Not no-op when condition is different",
pod: podWithNode,
condition: &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionTrue},
nominatingInfo: &framework.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: framework.ModeOverride},
nominatingInfo: &fwk.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: fwk.ModeOverride},
want: false,
},
{
name: "Not no-op when nominated node name is different",
pod: podWithNode,
condition: &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionFalse},
nominatingInfo: &framework.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: framework.ModeOverride},
nominatingInfo: &fwk.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: fwk.ModeOverride},
want: false,
},
{
name: "No-op when condition is nil and node name matches",
pod: podWithNode,
condition: nil,
nominatingInfo: &framework.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: framework.ModeOverride},
nominatingInfo: &fwk.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: fwk.ModeOverride},
want: true,
},
{
name: "Not no-op when condition is nil but node name differs",
pod: podWithNode,
condition: nil,
nominatingInfo: &framework.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: framework.ModeOverride},
nominatingInfo: &fwk.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: fwk.ModeOverride},
want: false,
},
}
@ -106,9 +106,9 @@ func TestPodStatusPatchCall_Merge(t *testing.T) {
t.Run("Merges nominating info and condition from the old call", func(t *testing.T) {
oldCall := NewPodStatusPatchCall(pod, &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionFalse},
&framework.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: framework.ModeOverride},
&fwk.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: fwk.ModeOverride},
)
newCall := NewPodStatusPatchCall(pod, nil, &framework.NominatingInfo{NominatingMode: framework.ModeNoop})
newCall := NewPodStatusPatchCall(pod, nil, &fwk.NominatingInfo{NominatingMode: fwk.ModeNoop})
if err := newCall.Merge(oldCall); err != nil {
t.Fatalf("Unexpected error returned by Merge(): %v", err)
@ -122,9 +122,9 @@ func TestPodStatusPatchCall_Merge(t *testing.T) {
})
t.Run("Doesn't overwrite nominating info and condition of a new call", func(t *testing.T) {
oldCall := NewPodStatusPatchCall(pod, nil, &framework.NominatingInfo{NominatingMode: framework.ModeNoop})
oldCall := NewPodStatusPatchCall(pod, nil, &fwk.NominatingInfo{NominatingMode: fwk.ModeNoop})
newCall := NewPodStatusPatchCall(pod, &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionFalse},
&framework.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: framework.ModeOverride})
&fwk.NominatingInfo{NominatedNodeName: "node-b", NominatingMode: fwk.ModeOverride})
if err := newCall.Merge(oldCall); err != nil {
t.Fatalf("Unexpected error returned by Merge(): %v", err)
@ -156,7 +156,7 @@ func TestPodStatusPatchCall_Sync(t *testing.T) {
t.Run("Syncs the status before execution and updates the pod", func(t *testing.T) {
call := NewPodStatusPatchCall(pod, nil,
&framework.NominatingInfo{NominatedNodeName: "node-c", NominatingMode: framework.ModeOverride})
&fwk.NominatingInfo{NominatedNodeName: "node-c", NominatingMode: fwk.ModeOverride})
updatedPod := pod.DeepCopy()
updatedPod.Status.NominatedNodeName = "node-b"
@ -176,7 +176,7 @@ func TestPodStatusPatchCall_Sync(t *testing.T) {
t.Run("Doesn't sync internal status during or after execution, but updates the pod", func(t *testing.T) {
call := NewPodStatusPatchCall(pod, nil,
&framework.NominatingInfo{NominatedNodeName: "node-c", NominatingMode: framework.ModeOverride})
&fwk.NominatingInfo{NominatedNodeName: "node-c", NominatingMode: fwk.ModeOverride})
call.executed = true
updatedPod := pod.DeepCopy()
@ -218,7 +218,7 @@ func TestPodStatusPatchCall_Execute(t *testing.T) {
})
call := NewPodStatusPatchCall(pod, &v1.PodCondition{Type: v1.PodScheduled, Status: v1.ConditionFalse},
&framework.NominatingInfo{NominatingMode: framework.ModeNoop})
&fwk.NominatingInfo{NominatingMode: fwk.ModeNoop})
if err := call.Execute(ctx, client); err != nil {
t.Fatalf("Unexpected error returned by Execute(): %v", err)
}
@ -239,7 +239,7 @@ func TestPodStatusPatchCall_Execute(t *testing.T) {
})
noOpCall := NewPodStatusPatchCall(pod, nil,
&framework.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: framework.ModeOverride})
&fwk.NominatingInfo{NominatedNodeName: "node-a", NominatingMode: fwk.ModeOverride})
if err := noOpCall.Execute(ctx, client); err != nil {
t.Fatalf("Unexpected error returned by Execute(): %v", err)
}

View file

@ -35,7 +35,7 @@ import (
)
type frameworkContract interface {
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *fwk.Status, sets.Set[string])
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*fwk.PreFilterResult, *fwk.Status, sets.Set[string])
RunFilterPlugins(context.Context, fwk.CycleState, *v1.Pod, fwk.NodeInfo) *fwk.Status
RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
}

View file

@ -26,16 +26,15 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/structured"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
var _ framework.NodeInfoLister = &nodeInfoListerContract{}
var _ framework.StorageInfoLister = &storageInfoListerContract{}
var _ framework.SharedLister = &shareListerContract{}
var _ framework.ResourceSliceLister = &resourceSliceListerContract{}
var _ framework.DeviceClassLister = &deviceClassListerContract{}
var _ framework.ResourceClaimTracker = &resourceClaimTrackerContract{}
var _ framework.SharedDRAManager = &sharedDRAManagerContract{}
var _ fwk.NodeInfoLister = &nodeInfoListerContract{}
var _ fwk.StorageInfoLister = &storageInfoListerContract{}
var _ fwk.SharedLister = &shareListerContract{}
var _ fwk.ResourceSliceLister = &resourceSliceListerContract{}
var _ fwk.DeviceClassLister = &deviceClassListerContract{}
var _ fwk.ResourceClaimTracker = &resourceClaimTrackerContract{}
var _ fwk.SharedDRAManager = &sharedDRAManagerContract{}
type nodeInfoListerContract struct{}
@ -63,11 +62,11 @@ func (c *storageInfoListerContract) IsPVCUsedByPods(_ string) bool {
type shareListerContract struct{}
func (c *shareListerContract) NodeInfos() framework.NodeInfoLister {
func (c *shareListerContract) NodeInfos() fwk.NodeInfoLister {
return nil
}
func (c *shareListerContract) StorageInfos() framework.StorageInfoLister {
func (c *shareListerContract) StorageInfos() fwk.StorageInfoLister {
return nil
}
@ -126,14 +125,14 @@ func (r *resourceClaimTrackerContract) AssumedClaimRestore(_, _ string) {
type sharedDRAManagerContract struct{}
func (s *sharedDRAManagerContract) ResourceClaims() framework.ResourceClaimTracker {
func (s *sharedDRAManagerContract) ResourceClaims() fwk.ResourceClaimTracker {
return nil
}
func (s *sharedDRAManagerContract) ResourceSlices() framework.ResourceSliceLister {
func (s *sharedDRAManagerContract) ResourceSlices() fwk.ResourceSliceLister {
return nil
}
func (s *sharedDRAManagerContract) DeviceClasses() framework.DeviceClassLister {
func (s *sharedDRAManagerContract) DeviceClasses() fwk.DeviceClassLister {
return nil
}

View file

@ -20,46 +20,14 @@ package framework
import (
"context"
"math"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
)
// NodeScoreList declares a list of nodes and their scores.
type NodeScoreList []NodeScore
// NodeScore is a struct with node name and score.
type NodeScore struct {
Name string
Score int64
}
// NodeToStatusReader is a read-only interface of NodeToStatus passed to each PostFilter plugin.
type NodeToStatusReader interface {
// Get returns the status for given nodeName.
// If the node is not in the map, the AbsentNodesStatus is returned.
Get(nodeName string) *fwk.Status
// NodesForStatusCode returns a list of NodeInfos for the nodes that have a given status code.
// It returns the NodeInfos for all matching nodes denoted by AbsentNodesStatus as well.
NodesForStatusCode(nodeLister NodeInfoLister, code fwk.Code) ([]fwk.NodeInfo, error)
}
// NodeToStatusMap is an alias for NodeToStatusReader to keep partial backwards compatibility.
// NodeToStatusReader should be used if possible.
type NodeToStatusMap = NodeToStatusReader
// NodeToStatus contains the statuses of the Nodes where the incoming Pod was not schedulable.
type NodeToStatus struct {
// nodeToStatus contains specific statuses of the nodes.
@ -128,7 +96,7 @@ func (m *NodeToStatus) ForEachExplicitNode(fn func(nodeName string, status *fwk.
// and filtered using NodeToStatus.Get.
// If the absentNodesStatus doesn't match the code, nodeToStatus map is used to create a list of nodes
// and nodeLister.Get is used to obtain NodeInfo for each.
func (m *NodeToStatus) NodesForStatusCode(nodeLister NodeInfoLister, code fwk.Code) ([]fwk.NodeInfo, error) {
func (m *NodeToStatus) NodesForStatusCode(nodeLister fwk.NodeInfoLister, code fwk.Code) ([]fwk.NodeInfo, error) {
var resultNodes []fwk.NodeInfo
if m.AbsentNodesStatus().Code() == code {
@ -161,34 +129,6 @@ func (m *NodeToStatus) NodesForStatusCode(nodeLister NodeInfoLister, code fwk.Co
return resultNodes, nil
}
// NodePluginScores is a struct with node name and scores for that node.
type NodePluginScores struct {
// Name is node name.
Name string
// Scores is scores from plugins and extenders.
Scores []PluginScore
// TotalScore is the total score in Scores.
TotalScore int64
}
// PluginScore is a struct with plugin/extender name and score.
type PluginScore struct {
// Name is the name of plugin or extender.
Name string
Score int64
}
const (
// MaxNodeScore is the maximum score a Score plugin is expected to return.
MaxNodeScore int64 = 100
// MinNodeScore is the minimum score a Score plugin is expected to return.
MinNodeScore int64 = 0
// MaxTotalScore is the maximum total score.
MaxTotalScore int64 = math.MaxInt64
)
// PodsToActivateKey is a reserved state key for stashing pods.
// If the stashed pods are present in unschedulablePods or backoffQthey will be
// activated (i.e., moved to activeQ) in two phases:
@ -213,294 +153,19 @@ func NewPodsToActivate() *PodsToActivate {
return &PodsToActivate{Map: make(map[string]*v1.Pod)}
}
// WaitingPod represents a pod currently waiting in the permit phase.
type WaitingPod interface {
// GetPod returns a reference to the waiting pod.
GetPod() *v1.Pod
// GetPendingPlugins returns a list of pending Permit plugin's name.
GetPendingPlugins() []string
// Allow declares the waiting pod is allowed to be scheduled by the plugin named as "pluginName".
// If this is the last remaining plugin to allow, then a success signal is delivered
// to unblock the pod.
Allow(pluginName string)
// Reject declares the waiting pod unschedulable.
Reject(pluginName, msg string)
}
// Plugin is the parent type for all the scheduling framework plugins.
type Plugin interface {
Name() string
}
// PreEnqueuePlugin is an interface that must be implemented by "PreEnqueue" plugins.
// These plugins are called prior to adding Pods to activeQ.
// Note: an preEnqueue plugin is expected to be lightweight and efficient, so it's not expected to
// involve expensive calls like accessing external endpoints; otherwise it'd block other
// Pods' enqueuing in event handlers.
type PreEnqueuePlugin interface {
Plugin
// PreEnqueue is called prior to adding Pods to activeQ.
PreEnqueue(ctx context.Context, p *v1.Pod) *fwk.Status
}
// LessFunc is the function to sort pod info
type LessFunc func(podInfo1, podInfo2 fwk.QueuedPodInfo) bool
// QueueSortPlugin is an interface that must be implemented by "QueueSort" plugins.
// These plugins are used to sort pods in the scheduling queue. Only one queue sort
// plugin may be enabled at a time.
type QueueSortPlugin interface {
Plugin
// Less are used to sort pods in the scheduling queue.
Less(fwk.QueuedPodInfo, fwk.QueuedPodInfo) bool
}
// EnqueueExtensions is an optional interface that plugins can implement to efficiently
// move unschedulable Pods in internal scheduling queues.
// In the scheduler, Pods can be unschedulable by PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins,
// and Pods rejected by these plugins are requeued based on this extension point.
// Failures from other extension points are regarded as temporal errors (e.g., network failure),
// and the scheduler requeue Pods without this extension point - always requeue Pods to activeQ after backoff.
// This is because such temporal errors cannot be resolved by specific cluster events,
// and we have no choose but keep retrying scheduling until the failure is resolved.
//
// Plugins that make pod unschedulable (PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins) must implement this interface,
// otherwise the default implementation will be used, which is less efficient in requeueing Pods rejected by the plugin.
//
// Also, if EventsToRegister returns an empty list, that means the Pods failed by the plugin are not requeued by any events,
// which doesn't make sense in most cases (very likely misuse)
// since the pods rejected by the plugin could be stuck in the unschedulable pod pool forever.
//
// If plugins other than above extension points support this interface, they are just ignored.
type EnqueueExtensions interface {
Plugin
// EventsToRegister returns a series of possible events that may cause a Pod
// failed by this plugin schedulable. Each event has a callback function that
// filters out events to reduce useless retry of Pod's scheduling.
// The events will be registered when instantiating the internal scheduling queue,
// and leveraged to build event handlers dynamically.
// When it returns an error, the scheduler fails to start.
// Note: the returned list needs to be determined at a startup,
// and the scheduler only evaluates it once during start up.
// Do not change the result during runtime, for example, based on the cluster's state etc.
//
// Appropriate implementation of this function will make Pod's re-scheduling accurate and performant.
EventsToRegister(context.Context) ([]fwk.ClusterEventWithHint, error)
}
// PreFilterExtensions is an interface that is included in plugins that allow specifying
// callbacks to make incremental updates to its supposedly pre-calculated
// state.
type PreFilterExtensions interface {
// AddPod is called by the framework while trying to evaluate the impact
// of adding podToAdd to the node while scheduling podToSchedule.
AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status
// RemovePod is called by the framework while trying to evaluate the impact
// of removing podToRemove from the node while scheduling podToSchedule.
RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status
}
// PreFilterPlugin is an interface that must be implemented by "PreFilter" plugins.
// These plugins are called at the beginning of the scheduling cycle.
type PreFilterPlugin interface {
Plugin
// PreFilter is called at the beginning of the scheduling cycle. All PreFilter
// plugins must return success or the pod will be rejected. PreFilter could optionally
// return a PreFilterResult to influence which nodes to evaluate downstream. This is useful
// for cases where it is possible to determine the subset of nodes to process in O(1) time.
// When PreFilterResult filters out some Nodes, the framework considers Nodes that are filtered out as getting "UnschedulableAndUnresolvable".
// i.e., those Nodes will be out of the candidates of the preemption.
//
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*PreFilterResult, *fwk.Status)
// PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one,
// or nil if it does not. A Pre-filter plugin can provide extensions to incrementally
// modify its pre-processed info. The framework guarantees that the extensions
// AddPod/RemovePod will only be called after PreFilter, possibly on a cloned
// CycleState, and may call those functions more than once before calling
// Filter again on a specific node.
PreFilterExtensions() PreFilterExtensions
}
// FilterPlugin is an interface for Filter plugins. These plugins are called at the
// filter extension point for filtering out hosts that cannot run a pod.
// This concept used to be called 'predicate' in the original scheduler.
// These plugins should return "Success", "Unschedulable" or "Error" in Status.code.
// However, the scheduler accepts other valid codes as well.
// Anything other than "Success" will lead to exclusion of the given host from
// running the pod.
type FilterPlugin interface {
Plugin
// Filter is called by the scheduling framework.
// All FilterPlugins should return "Success" to declare that
// the given node fits the pod. If Filter doesn't return "Success",
// it will return "Unschedulable", "UnschedulableAndUnresolvable" or "Error".
//
// "Error" aborts pod scheduling and puts the pod into the backoff queue.
//
// For the node being evaluated, Filter plugins should look at the passed
// nodeInfo reference for this particular node's information (e.g., pods
// considered to be running on the node) instead of looking it up in the
// NodeInfoSnapshot because we don't guarantee that they will be the same.
// For example, during preemption, we may pass a copy of the original
// nodeInfo object that has some pods removed from it to evaluate the
// possibility of preempting them to schedule the target pod.
//
// Plugins are encouraged to check the context for cancellation.
// Once canceled, they should return as soon as possible with
// an UnschedulableAndUnresolvable status that includes the
// `context.Cause(ctx)` error explanation. For example, the
// context gets canceled when a sufficient number of suitable
// nodes have been found and searching for more isn't necessary
// anymore.
Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status
}
// PostFilterPlugin is an interface for "PostFilter" plugins. These plugins are called
// after a pod cannot be scheduled.
type PostFilterPlugin interface {
Plugin
// PostFilter is called by the scheduling framework
// when the scheduling cycle failed at PreFilter or Filter by Unschedulable or UnschedulableAndUnresolvable.
// NodeToStatusReader has statuses that each Node got in PreFilter or Filter phase.
//
// If you're implementing a custom preemption with PostFilter, ignoring Nodes with UnschedulableAndUnresolvable is the responsibility of your plugin,
// meaning NodeToStatusReader could have Nodes with UnschedulableAndUnresolvable
// and the scheduling framework does call PostFilter plugins even when all Nodes in NodeToStatusReader are UnschedulableAndUnresolvable.
//
// A PostFilter plugin should return one of the following statuses:
// - Unschedulable: the plugin gets executed successfully but the pod cannot be made schedulable.
// - Success: the plugin gets executed successfully and the pod can be made schedulable.
// - Error: the plugin aborts due to some internal error.
//
// Informational plugins should be configured ahead of other ones, and always return Unschedulable status.
// Optionally, a non-nil PostFilterResult may be returned along with a Success status. For example,
// a preemption plugin may choose to return nominatedNodeName, so that framework can reuse that to update the
// preemptor pod's .spec.status.nominatedNodeName field.
PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *fwk.Status)
}
// PreScorePlugin is an interface for "PreScore" plugin. PreScore is an
// informational extension point. Plugins will be called with a list of nodes
// that passed the filtering phase. A plugin may use this data to update internal
// state or to generate logs/metrics.
type PreScorePlugin interface {
Plugin
// PreScore is called by the scheduling framework after a list of nodes
// passed the filtering phase. All prescore plugins must return success or
// the pod will be rejected
// When it returns Skip status, other fields in status are just ignored,
// and coupled Score plugin will be skipped in this scheduling cycle.
PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status
}
// ScoreExtensions is an interface for Score extended functionality.
type ScoreExtensions interface {
// NormalizeScore is called for all node scores produced by the same plugin's "Score"
// method. A successful run of NormalizeScore will update the scores list and return
// a success status.
NormalizeScore(ctx context.Context, state fwk.CycleState, p *v1.Pod, scores NodeScoreList) *fwk.Status
}
// ScorePlugin is an interface that must be implemented by "Score" plugins to rank
// nodes that passed the filtering phase.
type ScorePlugin interface {
Plugin
// Score is called on each filtered node. It must return success and an integer
// indicating the rank of the node. All scoring plugins must return success or
// the pod will be rejected.
Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status)
// ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not.
ScoreExtensions() ScoreExtensions
}
// ReservePlugin is an interface for plugins with Reserve and Unreserve
// methods. These are meant to update the state of the plugin. This concept
// used to be called 'assume' in the original scheduler. These plugins should
// return only Success or Error in Status.code. However, the scheduler accepts
// other valid codes as well. Anything other than Success will lead to
// rejection of the pod.
type ReservePlugin interface {
Plugin
// Reserve is called by the scheduling framework when the scheduler cache is
// updated. If this method returns a failed Status, the scheduler will call
// the Unreserve method for all enabled ReservePlugins.
Reserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status
// Unreserve is called by the scheduling framework when a reserved pod was
// rejected, an error occurred during reservation of subsequent plugins, or
// in a later phase. The Unreserve method implementation must be idempotent
// and may be called by the scheduler even if the corresponding Reserve
// method for the same plugin was not called.
Unreserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string)
}
// PreBindPlugin is an interface that must be implemented by "PreBind" plugins.
// These plugins are called before a pod being scheduled.
type PreBindPlugin interface {
Plugin
// PreBindPreFlight is called before PreBind, and the plugin is supposed to return Success, Skip, or Error status
// to tell the scheduler whether the plugin will do something in PreBind or not.
// If it returns Success, it means this PreBind plugin will handle this pod.
// If it returns Skip, it means this PreBind plugin has nothing to do with the pod, and PreBind will be skipped.
// This function should be lightweight, and shouldn't do any actual operation, e.g., creating a volume etc.
PreBindPreFlight(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status
// PreBind is called before binding a pod. All prebind plugins must return
// success or the pod will be rejected and won't be sent for binding.
PreBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status
}
// PostBindPlugin is an interface that must be implemented by "PostBind" plugins.
// These plugins are called after a pod is successfully bound to a node.
type PostBindPlugin interface {
Plugin
// PostBind is called after a pod is successfully bound. These plugins are
// informational. A common application of this extension point is for cleaning
// up. If a plugin needs to clean-up its state after a pod is scheduled and
// bound, PostBind is the extension point that it should register.
PostBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string)
}
// PermitPlugin is an interface that must be implemented by "Permit" plugins.
// These plugins are called before a pod is bound to a node.
type PermitPlugin interface {
Plugin
// Permit is called before binding a pod (and before prebind plugins). Permit
// plugins are used to prevent or delay the binding of a Pod. A permit plugin
// must return success or wait with timeout duration, or the pod will be rejected.
// The pod will also be rejected if the wait timeout or the pod is rejected while
// waiting. Note that if the plugin returns "wait", the framework will wait only
// after running the remaining plugins given that no other plugin rejects the pod.
Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*fwk.Status, time.Duration)
}
// BindPlugin is an interface that must be implemented by "Bind" plugins. Bind
// plugins are used to bind a pod to a Node.
type BindPlugin interface {
Plugin
// Bind plugins will not be called until all pre-bind plugins have completed. Each
// bind plugin is called in the configured order. A bind plugin may choose whether
// or not to handle the given Pod. If a bind plugin chooses to handle a Pod, the
// remaining bind plugins are skipped. When a bind plugin does not handle a pod,
// it must return Skip in its Status code. If a bind plugin returns an Error, the
// pod is rejected and will not be bound.
Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status
}
// Framework manages the set of plugins in use by the scheduling framework.
// Configured plugins are called at specified points in a scheduling context.
type Framework interface {
Handle
fwk.Handle
// PreEnqueuePlugins returns the registered preEnqueue plugins.
PreEnqueuePlugins() []PreEnqueuePlugin
PreEnqueuePlugins() []fwk.PreEnqueuePlugin
// EnqueueExtensions returns the registered Enqueue extensions.
EnqueueExtensions() []EnqueueExtensions
EnqueueExtensions() []fwk.EnqueueExtensions
// QueueSortFunc returns the function to sort pods in scheduling queue
QueueSortFunc() LessFunc
QueueSortFunc() fwk.LessFunc
// RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns
// *fwk.Status and its code is set to non-success if any of the plugins returns
@ -511,13 +176,13 @@ type Framework interface {
// The third returns value contains PreFilter plugin that rejected some or all Nodes with PreFilterResult.
// But, note that it doesn't contain any plugin when a plugin rejects this Pod with non-success status,
// not with PreFilterResult.
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*PreFilterResult, *fwk.Status, sets.Set[string])
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*fwk.PreFilterResult, *fwk.Status, sets.Set[string])
// RunPostFilterPlugins runs the set of configured PostFilter plugins.
// PostFilter plugins can either be informational, in which case should be configured
// to execute first and return Unschedulable status, or ones that try to change the
// cluster state to make the pod potentially schedulable in a future scheduling cycle.
RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *fwk.Status)
RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status)
// RunPreBindPlugins runs the set of configured PreBind plugins. It returns
// *fwk.Status and its code is set to non-success if any of the plugins returns
@ -583,231 +248,21 @@ type Framework interface {
PercentageOfNodesToScore() *int32
// SetPodNominator sets the PodNominator
SetPodNominator(nominator PodNominator)
SetPodNominator(nominator fwk.PodNominator)
// SetPodActivator sets the PodActivator
SetPodActivator(activator PodActivator)
SetPodActivator(activator fwk.PodActivator)
// SetAPICacher sets the APICacher
SetAPICacher(apiCacher APICacher)
SetAPICacher(apiCacher fwk.APICacher)
// Close calls Close method of each plugin.
Close() error
}
// Handle provides data and some tools that plugins can use. It is
// passed to the plugin factories at the time of plugin initialization. Plugins
// must store and use this handle to call framework functions.
type Handle interface {
// PodNominator abstracts operations to maintain nominated Pods.
PodNominator
// PluginsRunner abstracts operations to run some plugins.
PluginsRunner
// PodActivator abstracts operations in the scheduling queue.
PodActivator
// SnapshotSharedLister returns listers from the latest NodeInfo Snapshot. The snapshot
// is taken at the beginning of a scheduling cycle and remains unchanged until
// a pod finishes "Permit" point.
//
// It should be used only during scheduling cycle:
// - There is no guarantee that the information remains unchanged in the binding phase of scheduling.
// So, plugins shouldn't use it in the binding cycle (pre-bind/bind/post-bind/un-reserve plugin)
// otherwise, a concurrent read/write error might occur.
// - There is no guarantee that the information is always up-to-date.
// So, plugins shouldn't use it in QueueingHint and PreEnqueue
// otherwise, they might make a decision based on stale information.
//
// Instead, they should use the resources getting from Informer created from SharedInformerFactory().
SnapshotSharedLister() SharedLister
// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map.
IterateOverWaitingPods(callback func(WaitingPod))
// GetWaitingPod returns a waiting pod given its UID.
GetWaitingPod(uid types.UID) WaitingPod
// RejectWaitingPod rejects a waiting pod given its UID.
// The return value indicates if the pod is waiting or not.
RejectWaitingPod(uid types.UID) bool
// ClientSet returns a kubernetes clientSet.
ClientSet() clientset.Interface
// KubeConfig returns the raw kube config.
KubeConfig() *restclient.Config
// EventRecorder returns an event recorder.
EventRecorder() events.EventRecorder
SharedInformerFactory() informers.SharedInformerFactory
// SharedDRAManager can be used to obtain DRA objects, and track modifications to them in-memory - mainly by the DRA plugin.
// A non-default implementation can be plugged into the framework to simulate the state of DRA objects.
SharedDRAManager() SharedDRAManager
// RunFilterPluginsWithNominatedPods runs the set of configured filter plugins for nominated pod on the given node.
RunFilterPluginsWithNominatedPods(ctx context.Context, state fwk.CycleState, pod *v1.Pod, info fwk.NodeInfo) *fwk.Status
// Extenders returns registered scheduler extenders.
Extenders() []Extender
// Parallelizer returns a parallelizer holding parallelism for scheduler.
Parallelizer() parallelize.Parallelizer
// APIDispatcher returns a fwk.APIDispatcher that can be used to dispatch API calls directly.
// This is non-nil if the SchedulerAsyncAPICalls feature gate is enabled.
APIDispatcher() fwk.APIDispatcher
// APICacher returns an APICacher that coordinates API calls with the scheduler's internal cache.
// Use this to ensure the scheduler's view of the cluster remains consistent.
// This is non-nil if the SchedulerAsyncAPICalls feature gate is enabled.
APICacher() APICacher
}
// APICacher defines methods that send API calls through the scheduler's cache
// before they are executed asynchronously by the APIDispatcher.
// This ensures the scheduler's internal state is updated optimistically,
// reflecting the intended outcome of the call.
// This methods should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
type APICacher interface {
// PatchPodStatus sends a patch request for a Pod's status.
// The patch could be first applied to the cached Pod object and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *NominatingInfo) (<-chan error, error)
// BindPod sends a binding request. The binding could be first applied to the cached Pod object
// and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
BindPod(binding *v1.Binding) (<-chan error, error)
// WaitOnFinish blocks until the result of an API call is sent to the given onFinish channel
// (returned by methods BindPod or PreemptPod).
//
// It returns the error received from the channel.
// It also returns nil if the call was skipped or overwritten,
// as these are considered successful lifecycle outcomes.
// Direct onFinish channel read can be used to access these results.
WaitOnFinish(ctx context.Context, onFinish <-chan error) error
}
// APICallImplementations define constructors for each fwk.APICall that is used by the scheduler internally.
type APICallImplementations[T, K fwk.APICall] struct {
// PodStatusPatch is a constructor used to create fwk.APICall object for pod status patch.
PodStatusPatch func(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *NominatingInfo) T
// PodBinding is a constructor used to create fwk.APICall object for pod binding.
PodBinding func(binding *v1.Binding) K
}
// PreFilterResult wraps needed info for scheduler framework to act upon PreFilter phase.
type PreFilterResult struct {
// The set of nodes that should be considered downstream; if nil then
// all nodes are eligible.
NodeNames sets.Set[string]
}
func (p *PreFilterResult) AllNodes() bool {
return p == nil || p.NodeNames == nil
}
func (p *PreFilterResult) Merge(in *PreFilterResult) *PreFilterResult {
if p.AllNodes() && in.AllNodes() {
return nil
}
r := PreFilterResult{}
if p.AllNodes() {
r.NodeNames = in.NodeNames.Clone()
return &r
}
if in.AllNodes() {
r.NodeNames = p.NodeNames.Clone()
return &r
}
r.NodeNames = p.NodeNames.Intersection(in.NodeNames)
return &r
}
type NominatingMode int
const (
ModeNoop NominatingMode = iota
ModeOverride
)
type NominatingInfo struct {
NominatedNodeName string
NominatingMode NominatingMode
}
// PostFilterResult wraps needed info for scheduler framework to act upon PostFilter phase.
type PostFilterResult struct {
*NominatingInfo
}
func NewPostFilterResultWithNominatedNode(name string) *PostFilterResult {
return &PostFilterResult{
NominatingInfo: &NominatingInfo{
func NewPostFilterResultWithNominatedNode(name string) *fwk.PostFilterResult {
return &fwk.PostFilterResult{
NominatingInfo: &fwk.NominatingInfo{
NominatedNodeName: name,
NominatingMode: ModeOverride,
NominatingMode: fwk.ModeOverride,
},
}
}
func (ni *NominatingInfo) Mode() NominatingMode {
if ni == nil {
return ModeNoop
}
return ni.NominatingMode
}
// PodActivator abstracts operations in the scheduling queue.
type PodActivator interface {
// Activate moves the given pods to activeQ.
// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight,
// the wildcard event is registered so that the pod will be requeued when it comes back.
// But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod),
// Activate would ignore the pod.
Activate(logger klog.Logger, pods map[string]*v1.Pod)
}
// PodNominator abstracts operations to maintain nominated Pods.
type PodNominator interface {
// AddNominatedPod adds the given pod to the nominator or
// updates it if it already exists.
AddNominatedPod(logger klog.Logger, pod fwk.PodInfo, nominatingInfo *NominatingInfo)
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache. It's a no-op if it doesn't exist.
DeleteNominatedPodIfExists(pod *v1.Pod)
// UpdateNominatedPod updates the <oldPod> with <newPod>.
UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo fwk.PodInfo)
// NominatedPodsForNode returns nominatedPods on the given node.
NominatedPodsForNode(nodeName string) []fwk.PodInfo
}
// PluginsRunner abstracts operations to run some plugins.
// This is used by preemption PostFilter plugins when evaluating the feasibility of
// scheduling the pod on nodes when certain running pods get evicted.
type PluginsRunner interface {
// RunPreScorePlugins runs the set of configured PreScore plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected.
RunPreScorePlugins(context.Context, fwk.CycleState, *v1.Pod, []fwk.NodeInfo) *fwk.Status
// RunScorePlugins runs the set of configured scoring plugins.
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *fwk.Status, which is set to non-success if any of the plugins returns
// a non-success status.
RunScorePlugins(context.Context, fwk.CycleState, *v1.Pod, []fwk.NodeInfo) ([]NodePluginScores, *fwk.Status)
// RunFilterPlugins runs the set of configured Filter plugins for pod on
// the given node. Note that for the node being evaluated, the passed nodeInfo
// reference could be different from the one in NodeInfoSnapshot map (e.g., pods
// considered to be running on the node could be different). For example, during
// preemption, we may pass a copy of the original nodeInfo object that has some pods
// removed from it to evaluate the possibility of preempting them to
// schedule the target pod.
RunFilterPlugins(context.Context, fwk.CycleState, *v1.Pod, fwk.NodeInfo) *fwk.Status
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionAddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionRemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status
}

View file

@ -17,252 +17,14 @@ limitations under the License.
package framework
import (
"errors"
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
st "k8s.io/kubernetes/pkg/scheduler/testing"
)
var errorStatus = fwk.NewStatus(fwk.Error, "internal error")
var statusWithErr = fwk.AsStatus(errors.New("internal error"))
func TestStatus(t *testing.T) {
tests := []struct {
name string
status *fwk.Status
expectedCode fwk.Code
expectedMessage string
expectedIsSuccess bool
expectedIsWait bool
expectedIsSkip bool
expectedAsError error
}{
{
name: "success status",
status: fwk.NewStatus(fwk.Success, ""),
expectedCode: fwk.Success,
expectedMessage: "",
expectedIsSuccess: true,
expectedIsWait: false,
expectedIsSkip: false,
expectedAsError: nil,
},
{
name: "wait status",
status: fwk.NewStatus(fwk.Wait, ""),
expectedCode: fwk.Wait,
expectedMessage: "",
expectedIsSuccess: false,
expectedIsWait: true,
expectedIsSkip: false,
expectedAsError: nil,
},
{
name: "error status",
status: fwk.NewStatus(fwk.Error, "unknown error"),
expectedCode: fwk.Error,
expectedMessage: "unknown error",
expectedIsSuccess: false,
expectedIsWait: false,
expectedIsSkip: false,
expectedAsError: errors.New("unknown error"),
},
{
name: "skip status",
status: fwk.NewStatus(fwk.Skip, ""),
expectedCode: fwk.Skip,
expectedMessage: "",
expectedIsSuccess: false,
expectedIsWait: false,
expectedIsSkip: true,
expectedAsError: nil,
},
{
name: "nil status",
status: nil,
expectedCode: fwk.Success,
expectedMessage: "",
expectedIsSuccess: true,
expectedIsSkip: false,
expectedAsError: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.status.Code() != test.expectedCode {
t.Errorf("expect status.Code() returns %v, but %v", test.expectedCode, test.status.Code())
}
if test.status.Message() != test.expectedMessage {
t.Errorf("expect status.Message() returns %v, but %v", test.expectedMessage, test.status.Message())
}
if test.status.IsSuccess() != test.expectedIsSuccess {
t.Errorf("expect status.IsSuccess() returns %v, but %v", test.expectedIsSuccess, test.status.IsSuccess())
}
if test.status.IsWait() != test.expectedIsWait {
t.Errorf("status.IsWait() returns %v, but want %v", test.status.IsWait(), test.expectedIsWait)
}
if test.status.IsSkip() != test.expectedIsSkip {
t.Errorf("status.IsSkip() returns %v, but want %v", test.status.IsSkip(), test.expectedIsSkip)
}
if test.status.AsError() == test.expectedAsError {
return
}
if test.status.AsError().Error() != test.expectedAsError.Error() {
t.Errorf("expect status.AsError() returns %v, but %v", test.expectedAsError, test.status.AsError())
}
})
}
}
func TestPreFilterResultMerge(t *testing.T) {
tests := map[string]struct {
receiver *PreFilterResult
in *PreFilterResult
want *PreFilterResult
}{
"all nil": {},
"nil receiver empty input": {
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"nil receiver populated input": {
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"empty receiver populated input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"populated receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver and input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1", "node2")},
in: &PreFilterResult{NodeNames: sets.New("node2", "node3")},
want: &PreFilterResult{NodeNames: sets.New("node2")},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := test.receiver.Merge(test.in)
if diff := cmp.Diff(test.want, got); diff != "" {
t.Errorf("unexpected diff (-want, +got):\n%s", diff)
}
})
}
}
func TestIsStatusEqual(t *testing.T) {
tests := []struct {
name string
x, y *fwk.Status
want bool
}{
{
name: "two nil should be equal",
x: nil,
y: nil,
want: true,
},
{
name: "nil should be equal to success status",
x: nil,
y: fwk.NewStatus(fwk.Success),
want: true,
},
{
name: "nil should not be equal with status except success",
x: nil,
y: fwk.NewStatus(fwk.Error, "internal error"),
want: false,
},
{
name: "one status should be equal to itself",
x: errorStatus,
y: errorStatus,
want: true,
},
{
name: "same type statuses without reasons should be equal",
x: fwk.NewStatus(fwk.Success),
y: fwk.NewStatus(fwk.Success),
want: true,
},
{
name: "statuses with same message should be equal",
x: fwk.NewStatus(fwk.Unschedulable, "unschedulable"),
y: fwk.NewStatus(fwk.Unschedulable, "unschedulable"),
want: true,
},
{
name: "error statuses with same message should be equal",
x: fwk.NewStatus(fwk.Error, "error"),
y: fwk.NewStatus(fwk.Error, "error"),
want: true,
},
{
name: "statuses with different reasons should not be equal",
x: fwk.NewStatus(fwk.Unschedulable, "unschedulable"),
y: fwk.NewStatus(fwk.Unschedulable, "unschedulable", "injected filter status"),
want: false,
},
{
name: "statuses with different codes should not be equal",
x: fwk.NewStatus(fwk.Error, "internal error"),
y: fwk.NewStatus(fwk.Unschedulable, "internal error"),
want: false,
},
{
name: "wrap error status should be equal with original one",
x: statusWithErr,
y: fwk.AsStatus(fmt.Errorf("error: %w", statusWithErr.AsError())),
want: true,
},
{
name: "statues with different errors that have the same message shouldn't be equal",
x: fwk.AsStatus(errors.New("error")),
y: fwk.AsStatus(errors.New("error")),
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.x.Equal(tt.y); got != tt.want {
t.Errorf("cmp.Equal() = %v, want %v", got, tt.want)
}
})
}
}
type nodeInfoLister []fwk.NodeInfo
func (nodes nodeInfoLister) Get(nodeName string) (fwk.NodeInfo, error) {

View file

@ -27,12 +27,13 @@ import (
// DefaultParallelism is the default parallelism used in scheduler.
const DefaultParallelism int = 16
// Parallelizer holds the parallelism for scheduler.
// Parallelizer implements k8s.io/kube-scheduler/framework.Parallelizer helps run scheduling operations in parallel chunks where possible, to improve performance and CPU utilization.
// It wraps logic of k8s.io/client-go/util/workqueue to run operations on multiple workers.
type Parallelizer struct {
parallelism int
}
// NewParallelizer returns an object holding the parallelism.
// NewParallelizer returns an object holding the parallelism (number of workers).
func NewParallelizer(p int) Parallelizer {
return Parallelizer{parallelism: p}
}

View file

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@ -33,13 +32,13 @@ const Name = names.DefaultBinder
// DefaultBinder binds pods to nodes using a k8s client.
type DefaultBinder struct {
handle framework.Handle
handle fwk.Handle
}
var _ framework.BindPlugin = &DefaultBinder{}
var _ fwk.BindPlugin = &DefaultBinder{}
// New creates a DefaultBinder.
func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return &DefaultBinder{handle: handle}, nil
}

View file

@ -36,7 +36,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/framework/preemption"
@ -66,7 +65,7 @@ type MoreImportantPodFunc func(pod1, pod2 *v1.Pod) bool
// DefaultPreemption is a PostFilter plugin implements the preemption logic.
type DefaultPreemption struct {
fh framework.Handle
fh fwk.Handle
fts feature.Features
args config.DefaultPreemptionArgs
podLister corelisters.PodLister
@ -86,8 +85,8 @@ type DefaultPreemption struct {
MoreImportantPod MoreImportantPodFunc
}
var _ framework.PostFilterPlugin = &DefaultPreemption{}
var _ framework.PreEnqueuePlugin = &DefaultPreemption{}
var _ fwk.PostFilterPlugin = &DefaultPreemption{}
var _ fwk.PreEnqueuePlugin = &DefaultPreemption{}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *DefaultPreemption) Name() string {
@ -95,7 +94,7 @@ func (pl *DefaultPreemption) Name() string {
}
// New initializes a new plugin and returns it. The plugin type is retained to allow modification.
func New(_ context.Context, dpArgs runtime.Object, fh framework.Handle, fts feature.Features) (*DefaultPreemption, error) {
func New(_ context.Context, dpArgs runtime.Object, fh fwk.Handle, fts feature.Features) (*DefaultPreemption, error) {
args, ok := dpArgs.(*config.DefaultPreemptionArgs)
if !ok {
return nil, fmt.Errorf("got args of type %T, want *DefaultPreemptionArgs", dpArgs)
@ -129,7 +128,7 @@ func New(_ context.Context, dpArgs runtime.Object, fh framework.Handle, fts feat
}
// PostFilter invoked at the postFilter extension point.
func (pl *DefaultPreemption) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (pl *DefaultPreemption) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
defer func() {
metrics.PreemptionAttempts.Inc()
}()

View file

@ -123,7 +123,7 @@ type TestPlugin struct {
name string
}
func newTestPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newTestPlugin(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
return &TestPlugin{name: "test-plugin"}, nil
}
@ -145,11 +145,11 @@ func (pl *TestPlugin) Name() string {
return pl.name
}
func (pl *TestPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *TestPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
return nil, nil
}
@ -173,8 +173,8 @@ func TestPostFilter(t *testing.T) {
pdbs []*policy.PodDisruptionBudget
nodes []*v1.Node
filteredNodesStatuses *framework.NodeToStatus
extender framework.Extender
wantResult *framework.PostFilterResult
extender fwk.Extender
wantResult *fwk.PostFilterResult
wantStatus *fwk.Status
}{
{
@ -223,7 +223,7 @@ func TestPostFilter(t *testing.T) {
wantStatus: fwk.NewStatus(fwk.Unschedulable, "preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling."),
},
{
name: "preemption should respect absent NodeToStatusMap entry meaning UnschedulableAndUnresolvable",
name: "preemption should respect absent NodeToStatusReader entry meaning UnschedulableAndUnresolvable",
pod: st.MakePod().Name("p").UID("p").Namespace(v1.NamespaceDefault).Priority(highPriority).Obj(),
pods: []*v1.Pod{
st.MakePod().Name("p1").UID("p1").Namespace(v1.NamespaceDefault).Node("node1").Obj(),
@ -426,7 +426,7 @@ func TestPostFilter(t *testing.T) {
tf.RegisterPluginAsExtensions("test-plugin", newTestPlugin, "PreFilter"),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}
var extenders []framework.Extender
var extenders []fwk.Extender
if tt.extender != nil {
extenders = append(extenders, tt.extender)
}
@ -1156,7 +1156,7 @@ func TestDryRunPreemption(t *testing.T) {
registeredPlugins := append([]tf.RegisterPluginFunc{
tf.RegisterFilterPlugin(
"FakeFilter",
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return &fakePlugin, nil
},
)},
@ -1939,7 +1939,7 @@ func TestPreempt(t *testing.T) {
extenders []*tf.FakeExtender
nodeNames []string
registerPlugin tf.RegisterPluginFunc
want *framework.PostFilterResult
want *fwk.PostFilterResult
expectedPods []string // list of preempted pods
}{
{
@ -2196,7 +2196,7 @@ func TestPreempt(t *testing.T) {
cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[node.Name] = cachedNodeInfo
}
var extenders []framework.Extender
var extenders []fwk.Extender
for _, extender := range test.extenders {
// Set nodeInfoMap as extenders cached node information.
extender.CachedNodeNameToInfo = cachedNodeInfoMap

View file

@ -31,12 +31,12 @@ import (
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
)
var _ framework.SharedDRAManager = &DefaultDRAManager{}
var _ fwk.SharedDRAManager = &DefaultDRAManager{}
// DefaultDRAManager is the default implementation of SharedDRAManager. It obtains the DRA objects
// from API informers, and uses an AssumeCache and a map of in-flight allocations in order
@ -68,19 +68,19 @@ func NewDRAManager(ctx context.Context, claimsCache *assumecache.AssumeCache, re
return manager
}
func (s *DefaultDRAManager) ResourceClaims() framework.ResourceClaimTracker {
func (s *DefaultDRAManager) ResourceClaims() fwk.ResourceClaimTracker {
return s.resourceClaimTracker
}
func (s *DefaultDRAManager) ResourceSlices() framework.ResourceSliceLister {
func (s *DefaultDRAManager) ResourceSlices() fwk.ResourceSliceLister {
return s.resourceSliceLister
}
func (s *DefaultDRAManager) DeviceClasses() framework.DeviceClassLister {
func (s *DefaultDRAManager) DeviceClasses() fwk.DeviceClassLister {
return s.deviceClassLister
}
var _ framework.ResourceSliceLister = &resourceSliceLister{}
var _ fwk.ResourceSliceLister = &resourceSliceLister{}
type resourceSliceLister struct {
tracker *resourceslicetracker.Tracker
@ -90,7 +90,7 @@ func (l *resourceSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.Resourc
return l.tracker.ListPatchedResourceSlices()
}
var _ framework.DeviceClassLister = &deviceClassLister{}
var _ fwk.DeviceClassLister = &deviceClassLister{}
type deviceClassLister struct {
classLister resourcelisters.DeviceClassLister
@ -104,7 +104,7 @@ func (l *deviceClassLister) List() ([]*resourceapi.DeviceClass, error) {
return l.classLister.List(labels.Everything())
}
var _ framework.ResourceClaimTracker = &claimTracker{}
var _ fwk.ResourceClaimTracker = &claimTracker{}
type claimTracker struct {
// cache enables temporarily storing a newer claim object

View file

@ -177,14 +177,14 @@ type DynamicResources struct {
filterTimeout time.Duration
enableConsumableCapacity bool
fh framework.Handle
fh fwk.Handle
clientset kubernetes.Interface
celCache *cel.Cache
draManager framework.SharedDRAManager
draManager fwk.SharedDRAManager
}
// New initializes a new plugin and returns it.
func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(ctx context.Context, plArgs runtime.Object, fh fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if !fts.EnableDynamicResourceAllocation {
// Disabled, won't do anything.
return &DynamicResources{}, nil
@ -224,13 +224,13 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
return pl, nil
}
var _ framework.PreEnqueuePlugin = &DynamicResources{}
var _ framework.PreFilterPlugin = &DynamicResources{}
var _ framework.FilterPlugin = &DynamicResources{}
var _ framework.PostFilterPlugin = &DynamicResources{}
var _ framework.ReservePlugin = &DynamicResources{}
var _ framework.EnqueueExtensions = &DynamicResources{}
var _ framework.PreBindPlugin = &DynamicResources{}
var _ fwk.PreEnqueuePlugin = &DynamicResources{}
var _ fwk.PreFilterPlugin = &DynamicResources{}
var _ fwk.FilterPlugin = &DynamicResources{}
var _ fwk.PostFilterPlugin = &DynamicResources{}
var _ fwk.ReservePlugin = &DynamicResources{}
var _ fwk.EnqueueExtensions = &DynamicResources{}
var _ fwk.PreBindPlugin = &DynamicResources{}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *DynamicResources) Name() string {
@ -546,7 +546,7 @@ func (pl *DynamicResources) preFilterExtendedResources(pod *v1.Pod, logger klog.
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate claims bound. UnschedulableAndUnresolvable is returned if
// the pod cannot be scheduled at the moment on any node.
func (pl *DynamicResources) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *DynamicResources) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !pl.enabled {
return nil, fwk.NewStatus(fwk.Skip)
}
@ -727,7 +727,7 @@ func (pl *DynamicResources) validateDeviceClass(logger klog.Logger, deviceClassN
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *DynamicResources) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -1041,7 +1041,7 @@ func isSpecialClaimName(name string) bool {
// deallocated to help get the Pod schedulable. If yes, it picks one and
// requests its deallocation. This only gets called when filtering found no
// suitable node.
func (pl *DynamicResources) PostFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (pl *DynamicResources) PostFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
if !pl.enabled {
return nil, fwk.NewStatus(fwk.Unschedulable, "plugin disabled")
}

View file

@ -566,7 +566,7 @@ func (p perNodeResult) forNode(nodeName string) result {
type want struct {
preenqueue result
preFilterResult *framework.PreFilterResult
preFilterResult *fwk.PreFilterResult
prefilter result
filter perNodeResult
prescore result
@ -575,7 +575,7 @@ type want struct {
prebindPreFlight *fwk.Status
prebind result
postbind result
postFilterResult *framework.PostFilterResult
postFilterResult *fwk.PostFilterResult
postfilter result
// unreserveAfterBindFailure, if set, triggers a call to Unreserve

View file

@ -19,10 +19,10 @@ package extended
import (
v1 "k8s.io/api/core/v1"
"k8s.io/api/resource/v1beta1"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
)
func DeviceClassMapping(draManager framework.SharedDRAManager) (map[v1.ResourceName]string, error) {
func DeviceClassMapping(draManager fwk.SharedDRAManager) (map[v1.ResourceName]string, error) {
classes, err := draManager.DeviceClasses().List()
extendedResources := make(map[v1.ResourceName]string, len(classes))
if err != nil {

View file

@ -26,7 +26,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
resourcelisters "k8s.io/client-go/listers/resource/v1"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/test/utils/ktesting"
)
@ -34,17 +34,17 @@ type fakeDRAManager struct {
deviceClassLister *deviceClassLister
}
var _ framework.DeviceClassLister = &deviceClassLister{}
var _ fwk.DeviceClassLister = &deviceClassLister{}
func (f *fakeDRAManager) ResourceClaims() framework.ResourceClaimTracker {
func (f *fakeDRAManager) ResourceClaims() fwk.ResourceClaimTracker {
return nil
}
func (f *fakeDRAManager) ResourceSlices() framework.ResourceSliceLister {
func (f *fakeDRAManager) ResourceSlices() fwk.ResourceSliceLister {
return nil
}
func (f *fakeDRAManager) DeviceClasses() framework.DeviceClassLister {
func (f *fakeDRAManager) DeviceClasses() fwk.DeviceClassLister {
return f.deviceClassLister
}

View file

@ -18,14 +18,13 @@ package helper
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// DefaultNormalizeScore generates a Normalize Score function that can normalize the
// scores from [0, max(scores)] to [0, maxPriority]. If reverse is set to true, it
// reverses the scores by subtracting it from maxPriority.
// Note: The input scores are always assumed to be non-negative integers.
func DefaultNormalizeScore(maxPriority int64, reverse bool, scores framework.NodeScoreList) *fwk.Status {
func DefaultNormalizeScore(maxPriority int64, reverse bool, scores fwk.NodeScoreList) *fwk.Status {
var maxCount int64
for i := range scores {
if scores[i].Score > maxCount {

View file

@ -21,8 +21,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
)
func TestDefaultNormalizeScore(t *testing.T) {
@ -75,17 +74,17 @@ func TestDefaultNormalizeScore(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
scores := framework.NodeScoreList{}
scores := fwk.NodeScoreList{}
for _, score := range test.scores {
scores = append(scores, framework.NodeScore{Score: score})
scores = append(scores, fwk.NodeScore{Score: score})
}
expectedScores := framework.NodeScoreList{}
expectedScores := fwk.NodeScoreList{}
for _, score := range test.expectedScores {
expectedScores = append(expectedScores, framework.NodeScore{Score: score})
expectedScores = append(expectedScores, fwk.NodeScore{Score: score})
}
DefaultNormalizeScore(framework.MaxNodeScore, test.reverse, scores)
DefaultNormalizeScore(fwk.MaxNodeScore, test.reverse, scores)
if diff := cmp.Diff(expectedScores, scores); diff != "" {
t.Errorf("Unexpected scores (-want, +got):\n%s", diff)
}

View file

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@ -37,10 +36,10 @@ const (
// ImageLocality is a score plugin that favors nodes that already have requested pod container's images.
type ImageLocality struct {
handle framework.Handle
handle fwk.Handle
}
var _ framework.ScorePlugin = &ImageLocality{}
var _ fwk.ScorePlugin = &ImageLocality{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.ImageLocality
@ -65,12 +64,12 @@ func (pl *ImageLocality) Score(ctx context.Context, state fwk.CycleState, pod *v
}
// ScoreExtensions of the Score plugin.
func (pl *ImageLocality) ScoreExtensions() framework.ScoreExtensions {
func (pl *ImageLocality) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, h framework.Handle) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, h fwk.Handle) (fwk.Plugin, error) {
return &ImageLocality{handle: h}, nil
}
@ -84,7 +83,7 @@ func calculatePriority(sumScores int64, numContainers int) int64 {
sumScores = maxThreshold
}
return framework.MaxNodeScore * (sumScores - minThreshold) / (maxThreshold - minThreshold)
return fwk.MaxNodeScore * (sumScores - minThreshold) / (maxThreshold - minThreshold)
}
// sumImageScores returns the sum of image scores of all the containers that are already on the node.

View file

@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@ -239,7 +240,7 @@ func TestImageLocalityPriority(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
name string
}{
{
@ -254,7 +255,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 100 * (250M/2 - 23M)/(1000M * 2 - 23M) = 5
pod: &v1.Pod{Spec: test40250},
nodes: []*v1.Node{makeImageNode("node1", node403002000), makeImageNode("node2", node25010)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 5}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 5}},
name: "two images spread on two nodes, prefer the larger image one",
},
{
@ -269,7 +270,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: test40300},
nodes: []*v1.Node{makeImageNode("node1", node403002000), makeImageNode("node2", node25010)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 7}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 7}, {Name: "node2", Score: 0}},
name: "two images on one node, prefer this node",
},
{
@ -284,7 +285,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0 (10M/2 < 23M, min-threshold)
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("node1", node400030), makeImageNode("node2", node25010)},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}},
name: "if exceed limit, use limit",
},
{
@ -303,7 +304,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("node1", node400030), makeImageNode("node2", node25010), makeImageNode("node3", nodeWithNoImages)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 66}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 66}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
name: "if exceed limit, use limit (with node which has no images present)",
},
{
@ -322,7 +323,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: test300600900},
nodes: []*v1.Node{makeImageNode("node1", node60040900), makeImageNode("node2", node300600900), makeImageNode("node3", nodeWithNoImages)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 36}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 36}, {Name: "node3", Score: 0}},
name: "pod with multiple large images, node2 is preferred",
},
{
@ -337,7 +338,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: test3040},
nodes: []*v1.Node{makeImageNode("node1", node203040), makeImageNode("node2", node400030)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 1}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 1}, {Name: "node2", Score: 0}},
name: "pod with multiple small images",
},
{
@ -352,7 +353,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 100 * (30M * 1/2 - 23M) / (1000M * 2 - 23M) = 0
pod: &v1.Pod{Spec: test30Init300},
nodes: []*v1.Node{makeImageNode("node1", node403002000), makeImageNode("node2", node203040)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 6}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 6}, {Name: "node2", Score: 0}},
name: "include InitContainers: two images spread on two nodes, prefer the larger image one",
},
}
@ -371,7 +372,7 @@ func TestImageLocalityPriority(t *testing.T) {
if err != nil {
t.Fatalf("creating plugin: %v", err)
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, n := range test.nodes {
nodeName := n.ObjectMeta.Name
// Currently, we use the snapshot instead of the tf.BuildNodeInfos to build the nodeInfo since some
@ -381,11 +382,11 @@ func TestImageLocalityPriority(t *testing.T) {
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", nodeName, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
if diff := cmp.Diff(test.expectedList, gotList); diff != "" {

View file

@ -271,7 +271,7 @@ func (pl *InterPodAffinity) getIncomingAffinityAntiAffinityCounts(ctx context.Co
}
// PreFilter invoked at the prefilter extension point.
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, allNodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, allNodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
var nodesWithRequiredAntiAffinityPods []fwk.NodeInfo
var err error
if nodesWithRequiredAntiAffinityPods, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil {
@ -309,7 +309,7 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleS
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *InterPodAffinity) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}

View file

@ -558,7 +558,7 @@ func TestRequiredAffinitySingleNode(t *testing.T) {
}
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{}, snapshot, namespaces)
state := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, state, test.pod, nodeInfos)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, state, test.pod, nodeInfos)
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
t.Errorf("PreFilter: status does not match (-want,+got):\n%s", diff)
}
@ -567,7 +567,7 @@ func TestRequiredAffinitySingleNode(t *testing.T) {
}
nodeInfo := mustGetNodeInfo(t, snapshot, test.node.Name)
gotStatus := p.(framework.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantFilterStatus, gotStatus); diff != "" {
t.Errorf("Filter: status does not match (-want,+got):\n%s", diff)
}
@ -977,7 +977,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "NS1"}},
})
state := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, state, test.pod, nodeInfos)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, state, test.pod, nodeInfos)
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
t.Errorf("PreFilter: status does not match (-want,+got):\n%s", diff)
}
@ -986,7 +986,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
}
for indexNode, node := range test.nodes {
nodeInfo := mustGetNodeInfo(t, snapshot, node.Name)
gotStatus := p.(framework.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantFilterStatuses[indexNode], gotStatus); diff != "" {
t.Errorf("index: %d: Filter: status does not match (-want,+got):\n%s", indexTest, diff)
}
@ -1005,7 +1005,7 @@ func TestPreFilterDisabled(t *testing.T) {
defer cancel()
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{}, cache.NewEmptySnapshot(), nil)
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := fwk.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(gotStatus, wantStatus); diff != "" {
t.Errorf("Status does not match (-want,+got):\n%s", diff)
@ -1254,7 +1254,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
}
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{}, snapshot, nil)
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pendingPod, nodeInfos)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pendingPod, nodeInfos)
if !preFilterStatus.IsSuccess() {
t.Errorf("prefilter failed with status: %v", preFilterStatus)
}

View file

@ -28,8 +28,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -38,17 +36,17 @@ import (
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.InterPodAffinity
var _ framework.PreFilterPlugin = &InterPodAffinity{}
var _ framework.FilterPlugin = &InterPodAffinity{}
var _ framework.PreScorePlugin = &InterPodAffinity{}
var _ framework.ScorePlugin = &InterPodAffinity{}
var _ framework.EnqueueExtensions = &InterPodAffinity{}
var _ fwk.PreFilterPlugin = &InterPodAffinity{}
var _ fwk.FilterPlugin = &InterPodAffinity{}
var _ fwk.PreScorePlugin = &InterPodAffinity{}
var _ fwk.ScorePlugin = &InterPodAffinity{}
var _ fwk.EnqueueExtensions = &InterPodAffinity{}
// InterPodAffinity is a plugin that checks inter pod affinity
type InterPodAffinity struct {
parallelizer parallelize.Parallelizer
parallelizer fwk.Parallelizer
args config.InterPodAffinityArgs
sharedLister framework.SharedLister
sharedLister fwk.SharedLister
nsLister listersv1.NamespaceLister
enableSchedulingQueueHint bool
}
@ -84,7 +82,7 @@ func (pl *InterPodAffinity) EventsToRegister(_ context.Context) ([]fwk.ClusterEv
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if h.SnapshotSharedLister() == nil {
return nil, fmt.Errorf("SnapshotSharedlister is nil")
}

View file

@ -255,7 +255,7 @@ func (pl *InterPodAffinity) Score(ctx context.Context, cycleState fwk.CycleState
}
// NormalizeScore normalizes the score for each filteredNode.
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return fwk.AsStatus(err)
@ -280,7 +280,7 @@ func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.C
for i := range scores {
fScore := float64(0)
if maxMinDiff > 0 {
fScore = float64(framework.MaxNodeScore) * (float64(scores[i].Score-minCount) / float64(maxMinDiff))
fScore = float64(fwk.MaxNodeScore) * (float64(scores[i].Score-minCount) / float64(maxMinDiff))
}
scores[i].Score = int64(fScore)
@ -290,6 +290,6 @@ func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.C
}
// ScoreExtensions of the Score plugin.
func (pl *InterPodAffinity) ScoreExtensions() framework.ScoreExtensions {
func (pl *InterPodAffinity) ScoreExtensions() fwk.ScoreExtensions {
return pl
}

View file

@ -378,7 +378,7 @@ func TestPreferredAffinity(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
name string
ignorePreferredTermsOfExistingPods bool
wantStatus *fwk.Status
@ -410,7 +410,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
},
// the node1(node1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
// the node2(node2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1
@ -427,7 +427,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChinaAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
// there are 2 regions, say regionChina(node1,node3,node4) and regionIndia(node2,node5), both regions have nodes that match the preference.
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
@ -451,7 +451,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: framework.MaxNodeScore}, {Name: "node5", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: fwk.MaxNodeScore}, {Name: "node4", Score: fwk.MaxNodeScore}, {Name: "node5", Score: 0}},
},
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
{
@ -467,7 +467,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 20}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 20}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
@ -483,7 +483,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
{
name: "Affinity symmetry with namespace selector",
@ -497,7 +497,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
},
{
name: "AntiAffinity symmetry with namespace selector",
@ -511,7 +511,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: fwk.MaxNodeScore}},
},
{
name: "Affinity symmetry: considered RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
@ -525,7 +525,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
@ -545,7 +545,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
{
name: "Anti Affinity: pod that does not match topology key & match the pods in nodes will get higher score comparing to others ",
@ -558,7 +558,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
{
name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmatches will get high score",
@ -572,7 +572,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
// Test the symmetry cases for anti affinity
{
@ -586,7 +586,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelAzAz2}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
// Test both affinity and anti-affinity
{
@ -600,7 +600,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}},
},
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
@ -625,7 +625,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: framework.MaxNodeScore}, {Name: "node5", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: fwk.MaxNodeScore}, {Name: "node4", Score: fwk.MaxNodeScore}, {Name: "node5", Score: 0}},
},
// Consider Affinity, Anti Affinity and symmetry together.
// for Affinity, the weights are: 8, 0, 0, 0
@ -647,7 +647,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelAzAz2}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: fwk.MaxNodeScore}, {Name: "node4", Score: 0}},
},
// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
// 1. Some nodes in a topology don't have pods with affinity, but other nodes in the same topology have.
@ -663,7 +663,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
},
{
name: "invalid Affinity fails PreScore",
@ -696,7 +696,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}},
},
{
name: "Affinity with pods matching both NamespaceSelector and Namespaces fields",
@ -711,7 +711,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}},
},
{
name: "Affinity with pods matching NamespaceSelector",
@ -726,7 +726,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
{
name: "Affinity with pods matching both NamespaceSelector and Namespaces fields",
@ -741,7 +741,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
},
{
name: "Ignore preferred terms of existing pods",
@ -754,7 +754,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
wantStatus: fwk.NewStatus(fwk.Skip),
ignorePreferredTermsOfExistingPods: true,
},
@ -769,7 +769,7 @@ func TestPreferredAffinity(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: fwk.MaxNodeScore}},
ignorePreferredTermsOfExistingPods: false,
},
{
@ -788,7 +788,7 @@ func TestPreferredAffinity(t *testing.T) {
state := framework.NewCycleState()
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{HardPodAffinityWeight: 1, IgnorePreferredTermsOfExistingPods: test.ignorePreferredTermsOfExistingPods}, cache.NewSnapshot(test.pods, test.nodes), namespaces)
nodeInfos := tf.BuildNodeInfos(test.nodes)
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
if !status.IsSuccess() {
if status.Code() != test.wantStatus.Code() {
@ -801,17 +801,17 @@ func TestPreferredAffinity(t *testing.T) {
return
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, nodeInfo := range nodeInfos {
nodeName := nodeInfo.Node().Name
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("unexpected error from Score: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
status = p.(fwk.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error from NormalizeScore: %v", status)
}
@ -869,7 +869,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
pods []*v1.Pod
nodes []*v1.Node
hardPodAffinityWeight int32
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
name string
wantStatus *fwk.Status
}{
@ -886,7 +886,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
{
name: "with zero weight",
@ -931,7 +931,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
{
name: "with matching Namespaces",
@ -946,7 +946,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}, {Name: "node3", Score: 0}},
},
}
for _, test := range tests {
@ -957,7 +957,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
state := framework.NewCycleState()
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{HardPodAffinityWeight: test.hardPodAffinityWeight}, cache.NewSnapshot(test.pods, test.nodes), namespaces)
nodeInfos := tf.BuildNodeInfos(test.nodes)
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
if !test.wantStatus.Equal(status) {
t.Errorf("InterPodAffinity#PreScore() returned unexpected status.Code got: %v, want: %v", status.Code(), test.wantStatus.Code())
}
@ -965,21 +965,21 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
return
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, nodeInfo := range nodeInfos {
nodeName := nodeInfo.Node().Name
nodeInfo, err := p.(*InterPodAffinity).sharedLister.NodeInfos().Get(nodeName)
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", nodeName, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
status = p.(fwk.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}

View file

@ -29,7 +29,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@ -38,17 +37,17 @@ import (
// NodeAffinity is a plugin that checks if a pod node selector matches the node label.
type NodeAffinity struct {
handle framework.Handle
handle fwk.Handle
addedNodeSelector *nodeaffinity.NodeSelector
addedPrefSchedTerms *nodeaffinity.PreferredSchedulingTerms
enableSchedulingQueueHint bool
}
var _ framework.PreFilterPlugin = &NodeAffinity{}
var _ framework.FilterPlugin = &NodeAffinity{}
var _ framework.PreScorePlugin = &NodeAffinity{}
var _ framework.ScorePlugin = &NodeAffinity{}
var _ framework.EnqueueExtensions = &NodeAffinity{}
var _ fwk.PreFilterPlugin = &NodeAffinity{}
var _ fwk.FilterPlugin = &NodeAffinity{}
var _ fwk.PreScorePlugin = &NodeAffinity{}
var _ fwk.ScorePlugin = &NodeAffinity{}
var _ fwk.EnqueueExtensions = &NodeAffinity{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -144,7 +143,7 @@ func (pl *NodeAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1
}
// PreFilter builds and writes cycle state used by Filter.
func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
affinity := pod.Spec.Affinity
noNodeAffinity := (affinity == nil ||
affinity.NodeAffinity == nil ||
@ -190,14 +189,14 @@ func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState
if nodeNames != nil && len(nodeNames) == 0 {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, errReasonConflict)
} else if len(nodeNames) > 0 {
return &framework.PreFilterResult{NodeNames: nodeNames}, nil
return &fwk.PreFilterResult{NodeNames: nodeNames}, nil
}
return nil, nil
}
// PreFilterExtensions not necessary for this plugin as state doesn't depend on pod additions or deletions.
func (pl *NodeAffinity) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *NodeAffinity) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -285,17 +284,17 @@ func (pl *NodeAffinity) Score(ctx context.Context, state fwk.CycleState, pod *v1
}
// NormalizeScore invoked after scoring all nodes.
func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(framework.MaxNodeScore, false, scores)
func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(fwk.MaxNodeScore, false, scores)
}
// ScoreExtensions of the Score plugin.
func (pl *NodeAffinity) ScoreExtensions() framework.ScoreExtensions {
func (pl *NodeAffinity) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, err := getArgs(plArgs)
if err != nil {
return nil, err

View file

@ -46,7 +46,7 @@ func TestNodeAffinity(t *testing.T) {
nodeName string
wantStatus *fwk.Status
wantPreFilterStatus *fwk.Status
wantPreFilterResult *framework.PreFilterResult
wantPreFilterResult *fwk.PreFilterResult
args config.NodeAffinityArgs
runPreFilter bool
}{
@ -497,7 +497,7 @@ func TestNodeAffinity(t *testing.T) {
},
},
nodeName: "node1",
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@ -525,7 +525,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node2",
wantStatus: fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonPod),
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@ -601,7 +601,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node2",
labels: map[string]string{"foo": "bar"},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New("node1")},
wantStatus: fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonPod),
runPreFilter: true,
},
@ -637,7 +637,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node1",
labels: map[string]string{"foo": "bar"},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@ -710,7 +710,7 @@ func TestNodeAffinity(t *testing.T) {
},
},
nodeName: "node2",
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New("node1", "node2")},
runPreFilter: true,
},
{
@ -917,7 +917,7 @@ func TestNodeAffinity(t *testing.T) {
state := framework.NewCycleState()
var gotStatus *fwk.Status
if test.runPreFilter {
gotPreFilterResult, gotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, state, test.pod, nil)
gotPreFilterResult, gotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, state, test.pod, nil)
if diff := cmp.Diff(test.wantPreFilterStatus, gotStatus); diff != "" {
t.Errorf("unexpected PreFilter Status (-want,+got):\n%s", diff)
}
@ -925,7 +925,7 @@ func TestNodeAffinity(t *testing.T) {
t.Errorf("unexpected PreFilterResult (-want,+got):\n%s", diff)
}
}
gotStatus = p.(framework.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
gotStatus = p.(fwk.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("unexpected Filter Status (-want,+got):\n%s", diff)
}
@ -1012,7 +1012,7 @@ func TestNodeAffinityPriority(t *testing.T) {
name string
pod *v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
args config.NodeAffinityArgs
runPreScore bool
wantPreScoreStatus *fwk.Status
@ -1029,7 +1029,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
},
{
// PreScore returns Skip.
@ -1090,7 +1090,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
runPreScore: true,
},
{
@ -1105,7 +1105,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
runPreScore: true,
},
{
@ -1120,7 +1120,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: label5}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: framework.MaxNodeScore}, {Name: "node2", Score: 36}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 36}},
runPreScore: true,
},
{
@ -1130,7 +1130,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 0}},
args: config.NodeAffinityArgs{
AddedAffinity: affinity1.NodeAffinity,
},
@ -1148,7 +1148,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label5}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 40}, {Name: "node2", Score: 60}, {Name: "node3", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 40}, {Name: "node2", Score: 60}, {Name: "node3", Score: fwk.MaxNodeScore}},
args: config.NodeAffinityArgs{
AddedAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
@ -1181,7 +1181,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: label5}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: framework.MaxNodeScore}, {Name: "node2", Score: 36}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: fwk.MaxNodeScore}, {Name: "node2", Score: 36}},
},
}
@ -1199,7 +1199,7 @@ func TestNodeAffinityPriority(t *testing.T) {
}
var status *fwk.Status
if test.runPreScore {
status = p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
status = p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if status.Code() != test.wantPreScoreStatus.Code() {
t.Errorf("unexpected status code from PreScore: want: %v got: %v", test.wantPreScoreStatus.Code().String(), status.Code().String())
}
@ -1211,18 +1211,18 @@ func TestNodeAffinityPriority(t *testing.T) {
return
}
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
nodeInfos := tf.BuildNodeInfos(test.nodes)
for _, nodeInfo := range nodeInfos {
nodeName := nodeInfo.Node().Name
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
status = p.(fwk.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}

View file

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@ -32,8 +31,8 @@ type NodeName struct {
enableSchedulingQueueHint bool
}
var _ framework.FilterPlugin = &NodeName{}
var _ framework.EnqueueExtensions = &NodeName{}
var _ fwk.FilterPlugin = &NodeName{}
var _ fwk.EnqueueExtensions = &NodeName{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -83,7 +82,7 @@ func Fits(pod *v1.Pod, nodeInfo fwk.NodeInfo) bool {
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodeName{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil

View file

@ -63,7 +63,7 @@ func TestNodeName(t *testing.T) {
if err != nil {
t.Fatalf("creating plugin: %v", err)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}

View file

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -35,9 +34,9 @@ type NodePorts struct {
enableSchedulingQueueHint bool
}
var _ framework.PreFilterPlugin = &NodePorts{}
var _ framework.FilterPlugin = &NodePorts{}
var _ framework.EnqueueExtensions = &NodePorts{}
var _ fwk.PreFilterPlugin = &NodePorts{}
var _ fwk.FilterPlugin = &NodePorts{}
var _ fwk.EnqueueExtensions = &NodePorts{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -65,7 +64,7 @@ func (pl *NodePorts) Name() string {
}
// PreFilter invoked at the prefilter extension point.
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
s := util.GetHostPorts(pod)
// Skip if a pod has no ports.
if len(s) == 0 {
@ -76,7 +75,7 @@ func (pl *NodePorts) PreFilter(ctx context.Context, cycleState fwk.CycleState, p
}
// PreFilterExtensions do not exist for this plugin.
func (pl *NodePorts) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *NodePorts) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -183,7 +182,7 @@ func fitsPorts(wantPorts []v1.ContainerPort, portsInUse fwk.HostPortInfo) bool {
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodePorts{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil

View file

@ -184,7 +184,7 @@ func TestNodePorts(t *testing.T) {
t.Fatalf("creating plugin: %v", err)
}
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
t.Errorf("preFilter status does not match (-want,+got): %s", diff)
}
@ -194,7 +194,7 @@ func TestNodePorts(t *testing.T) {
if !preFilterStatus.IsSuccess() {
t.Errorf("prefilter failed with status: %v", preFilterStatus)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantFilterStatus, gotStatus); diff != "" {
t.Errorf("filter status does not match (-want, +got): %s", diff)
}
@ -213,7 +213,7 @@ func TestPreFilterDisabled(t *testing.T) {
t.Fatalf("creating plugin: %v", err)
}
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := fwk.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)

View file

@ -26,7 +26,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@ -34,12 +33,12 @@ import (
// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
type BalancedAllocation struct {
handle framework.Handle
handle fwk.Handle
resourceAllocationScorer
}
var _ framework.PreScorePlugin = &BalancedAllocation{}
var _ framework.ScorePlugin = &BalancedAllocation{}
var _ fwk.PreScorePlugin = &BalancedAllocation{}
var _ fwk.ScorePlugin = &BalancedAllocation{}
// BalancedAllocationName is the name of the plugin used in the plugin registry and configurations.
const (
@ -115,12 +114,12 @@ func (ba *BalancedAllocation) Score(ctx context.Context, state fwk.CycleState, p
}
// ScoreExtensions of the Score plugin.
func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions {
func (ba *BalancedAllocation) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// NewBalancedAllocation initializes a new plugin and returns it.
func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := baArgs.(*config.NodeResourcesBalancedAllocationArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesBalancedAllocationArgs, got %T", baArgs)
@ -176,5 +175,5 @@ func balancedResourceScorer(requested, allocable []int64) int64 {
// STD (standard deviation) is always a positive value. 1-deviation lets the score to be higher for node which has least deviation and
// multiplying it with `MaxNodeScore` provides the scaling factor needed.
return int64((1 - std) * float64(framework.MaxNodeScore))
return int64((1 - std) * float64(fwk.MaxNodeScore))
}

View file

@ -114,7 +114,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
name string
args config.NodeResourcesBalancedAllocationArgs
runPreScore bool
@ -142,7 +142,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Node2 Score: (1-0) * MaxNodeScore = MaxNodeScore
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("node1", 4000, 10000, nil), makeNode("node2", 6000, 10000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 87}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 87}, {Name: "node2", Score: fwk.MaxNodeScore}},
name: "nothing scheduled, resources requested, differently sized nodes",
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
runPreScore: true,
@ -160,7 +160,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Node2 Score: (1 - 0.05)*MaxNodeScore = 95
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 20000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
@ -182,7 +182,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Node2 Score: (1 - 0.2)*MaxNodeScore = 80
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 50000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 80}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 80}},
name: "resources requested, pods scheduled with resources, differently sized nodes",
pods: []*v1.Pod{
{Spec: cpuOnly},
@ -205,7 +205,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Node2 Score: (1 - 0.25)*MaxNodeScore = 75
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("node1", 6000, 10000, nil), makeNode("node2", 6000, 10000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 75}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 75}},
name: "requested resources at node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
@ -232,7 +232,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
"nvidia.com/gpu": "1",
}).Obj(),
nodes: []*v1.Node{makeNode("node1", 3500, 40000, scalarResource), makeNode("node2", 3500, 40000, scalarResource)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 65}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 65}},
name: "include scalar resource on a node for balanced resource allocation",
pods: []*v1.Pod{
{Spec: cpuAndMemory},
@ -259,7 +259,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
{
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("node1", 3500, 40000, scalarResource), makeNode("node2", 3500, 40000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 63}, {Name: "node2", Score: 63}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 63}, {Name: "node2", Score: 63}},
name: "node without the scalar resource should skip the scalar resource",
pods: []*v1.Pod{},
args: config.NodeResourcesBalancedAllocationArgs{Resources: []config.ResourceSpec{
@ -282,7 +282,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Node2 Score: (1 - 0.05)*MaxNodeScore = 95
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 20000, nil)},
expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}},
expectedList: []fwk.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}},
name: "resources requested, pods scheduled with resources if PreScore not called",
pods: []*v1.Pod{
{Spec: cpuOnly},
@ -303,7 +303,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
p, _ := NewBalancedAllocation(ctx, &test.args, fh, feature.Features{})
state := framework.NewCycleState()
if test.runPreScore {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if status.Code() != test.wantPreScoreStatusCode {
t.Errorf("unexpected status code, want: %v, got: %v", test.wantPreScoreStatusCode, status.Code())
}
@ -317,7 +317,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", test.nodes[i].Name, err)
}
hostResult, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
hostResult, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
}

View file

@ -38,11 +38,11 @@ import (
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
var _ framework.PreFilterPlugin = &Fit{}
var _ framework.FilterPlugin = &Fit{}
var _ framework.EnqueueExtensions = &Fit{}
var _ framework.PreScorePlugin = &Fit{}
var _ framework.ScorePlugin = &Fit{}
var _ fwk.PreFilterPlugin = &Fit{}
var _ fwk.FilterPlugin = &Fit{}
var _ fwk.EnqueueExtensions = &Fit{}
var _ fwk.PreScorePlugin = &Fit{}
var _ fwk.ScorePlugin = &Fit{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -93,12 +93,12 @@ type Fit struct {
enableSchedulingQueueHint bool
enablePodLevelResources bool
enableDRAExtendedResource bool
handle framework.Handle
handle fwk.Handle
resourceAllocationScorer
}
// ScoreExtensions of the Score plugin.
func (f *Fit) ScoreExtensions() framework.ScoreExtensions {
func (f *Fit) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -155,7 +155,7 @@ func (f *Fit) Name() string {
}
// NewFit initializes a new plugin and returns it.
func NewFit(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func NewFit(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := plArgs.(*config.NodeResourcesFitArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs)
@ -233,7 +233,7 @@ func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFi
}
// withDeviceClass adds resource to device class mapping to preFilterState.
func withDeviceClass(result *preFilterState, draManager framework.SharedDRAManager) *fwk.Status {
func withDeviceClass(result *preFilterState, draManager fwk.SharedDRAManager) *fwk.Status {
hasExtendedResource := false
for rName, rQuant := range result.ScalarResources {
// Skip in case request quantity is zero
@ -261,7 +261,7 @@ func withDeviceClass(result *preFilterState, draManager framework.SharedDRAManag
}
// PreFilter invoked at the prefilter extension point.
func (f *Fit) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (f *Fit) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !f.enableSidecarContainers && hasRestartableInitContainer(pod) {
// Scheduler will calculate resources usage for a Pod containing
// restartable init containers that will be equal or more than kubelet will
@ -282,7 +282,7 @@ func (f *Fit) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (f *Fit) PreFilterExtensions() framework.PreFilterExtensions {
func (f *Fit) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}

View file

@ -711,12 +711,12 @@ func TestEnoughRequests(t *testing.T) {
t.Fatal(err)
}
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if !preFilterStatus.IsSuccess() {
t.Errorf("prefilter failed with status: %v", preFilterStatus)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}
@ -747,7 +747,7 @@ func TestPreFilterDisabled(t *testing.T) {
t.Fatal(err)
}
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := fwk.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
@ -800,12 +800,12 @@ func TestNotEnoughRequests(t *testing.T) {
t.Fatal(err)
}
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if !preFilterStatus.IsSuccess() {
t.Errorf("prefilter failed with status: %v", preFilterStatus)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}
@ -861,12 +861,12 @@ func TestStorageRequests(t *testing.T) {
t.Fatal(err)
}
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if !preFilterStatus.IsSuccess() {
t.Errorf("prefilter failed with status: %v", preFilterStatus)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}
@ -971,7 +971,7 @@ func TestRestartableInitContainers(t *testing.T) {
t.Fatal(err)
}
cycleState := framework.NewCycleState()
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
t.Error("prefilter status does not match (-expected +actual):\n", diff)
}
@ -979,7 +979,7 @@ func TestRestartableInitContainers(t *testing.T) {
return
}
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
filterStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantFilterStatus, filterStatus); diff != "" {
t.Error("filter status does not match (-expected +actual):\n", diff)
}
@ -994,7 +994,7 @@ func TestFitScore(t *testing.T) {
requestedPod *v1.Pod
nodes []*v1.Node
existingPods []*v1.Pod
expectedPriorities framework.NodeScoreList
expectedPriorities fwk.NodeScoreList
nodeResourcesFitArgs config.NodeResourcesFitArgs
runPreScore bool
}{
@ -1011,7 +1011,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.RequestedToCapacityRatio,
@ -1039,7 +1039,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 68}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 68}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.RequestedToCapacityRatio,
@ -1067,7 +1067,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.MostAllocated,
@ -1089,7 +1089,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.LeastAllocated,
@ -1111,7 +1111,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.RequestedToCapacityRatio,
@ -1139,7 +1139,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.MostAllocated,
@ -1161,7 +1161,7 @@ func TestFitScore(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.LeastAllocated,
@ -1184,7 +1184,7 @@ func TestFitScore(t *testing.T) {
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 45}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 45}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.MostAllocated,
@ -1207,7 +1207,7 @@ func TestFitScore(t *testing.T) {
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 55}},
expectedPriorities: []fwk.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 55}},
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.LeastAllocated,
@ -1233,10 +1233,10 @@ func TestFitScore(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
var gotPriorities framework.NodeScoreList
var gotPriorities fwk.NodeScoreList
for _, n := range test.nodes {
if test.runPreScore {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
@ -1245,11 +1245,11 @@ func TestFitScore(t *testing.T) {
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
}
gotPriorities = append(gotPriorities, framework.NodeScore{Name: n.Name, Score: score})
gotPriorities = append(gotPriorities, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedPriorities, gotPriorities); diff != "" {

View file

@ -17,8 +17,8 @@ limitations under the License.
package noderesources
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// leastResourceScorer favors nodes with fewer requested resources.
@ -57,5 +57,5 @@ func leastRequestedScore(requested, capacity int64) int64 {
return 0
}
return ((capacity - requested) * framework.MaxNodeScore) / capacity
return ((capacity - requested) * fwk.MaxNodeScore) / capacity
}

View file

@ -41,7 +41,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
requestedPod *v1.Pod
nodes []*v1.Node
existingPods []*v1.Pod
expectedScores framework.NodeScoreList
expectedScores fwk.NodeScoreList
resources []config.ResourceSpec
wantErrs field.ErrorList
wantStatusCode fwk.Code
@ -62,7 +62,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}},
resources: defaultResources,
},
{
@ -84,7 +84,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 37}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 37}, {Name: "node2", Score: 50}},
resources: defaultResources,
},
{
@ -98,7 +98,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MinNodeScore}, {Name: "node2", Score: fwk.MinNodeScore}},
resources: nil,
wantStatusCode: fwk.Error,
},
@ -123,7 +123,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node2").Obj(),
st.MakePod().Node("node2").Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}},
resources: defaultResources,
},
{
@ -147,7 +147,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 57}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 57}},
resources: defaultResources,
},
{
@ -172,7 +172,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 45}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 45}},
resources: defaultResources,
},
{
@ -197,7 +197,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 60}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 60}},
resources: defaultResources,
},
{
@ -219,7 +219,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 25}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 25}},
resources: defaultResources,
},
{
@ -233,7 +233,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MinNodeScore}, {Name: "node2", Score: fwk.MinNodeScore}},
resources: defaultResources,
},
{
@ -253,7 +253,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
resources: []config.ResourceSpec{
{Name: "memory", Weight: 2},
{Name: "cpu", Weight: 1},
@ -291,7 +291,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
resources: []config.ResourceSpec{
{Name: "memory", Weight: 1},
{Name: "cpu", Weight: 0},
@ -340,7 +340,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
resources: extendedResourceSet,
},
{
@ -361,7 +361,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "10"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 60}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 60}},
resources: extendedResourceSet,
},
{
@ -378,7 +378,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 55}, {Name: "node2", Score: 55}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 55}, {Name: "node2", Score: 55}},
resources: []config.ResourceSpec{
{Name: extendedRes, Weight: 2},
{Name: string(v1.ResourceCPU), Weight: 1},
@ -413,22 +413,22 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
return
}
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
var gotScores framework.NodeScoreList
var gotScores fwk.NodeScoreList
for _, n := range test.nodes {
nodeInfo, err := snapshot.Get(n.Name)
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
if status.Code() != test.wantStatusCode {
t.Errorf("unexpected status code, want: %v, got: %v", test.wantStatusCode, status.Code())
}
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
gotScores = append(gotScores, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {

View file

@ -17,8 +17,8 @@ limitations under the License.
package noderesources
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// mostResourceScorer favors nodes with most requested resources.
@ -61,5 +61,5 @@ func mostRequestedScore(requested, capacity int64) int64 {
requested = capacity
}
return (requested * framework.MaxNodeScore) / capacity
return (requested * fwk.MaxNodeScore) / capacity
}

View file

@ -41,7 +41,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
requestedPod *v1.Pod
nodes []*v1.Node
existingPods []*v1.Pod
expectedScores framework.NodeScoreList
expectedScores fwk.NodeScoreList
resources []config.ResourceSpec
wantErrs field.ErrorList
wantStatusCode fwk.Code
@ -62,7 +62,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MinNodeScore}, {Name: "node2", Score: fwk.MinNodeScore}},
resources: defaultResources,
},
{
@ -84,7 +84,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 62}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 62}, {Name: "node2", Score: 50}},
resources: defaultResources,
},
{
@ -98,7 +98,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MinNodeScore}, {Name: "node2", Score: fwk.MinNodeScore}},
resources: nil,
wantStatusCode: fwk.Error,
},
@ -123,7 +123,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 30}, {Name: "node2", Score: 42}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 30}, {Name: "node2", Score: 42}},
resources: defaultResources,
},
{
@ -148,7 +148,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 42}, {Name: "node2", Score: 55}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 42}, {Name: "node2", Score: 55}},
resources: defaultResources,
},
{
@ -170,7 +170,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "9000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 75}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 75}},
resources: defaultResources,
},
{
@ -190,7 +190,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
},
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 58}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 58}, {Name: "node2", Score: 50}},
resources: []config.ResourceSpec{
{Name: "memory", Weight: 2},
{Name: "cpu", Weight: 1},
@ -215,7 +215,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Container("container").Obj(),
st.MakePod().Node("node1").Container("container").Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 80}, {Name: "node2", Score: 30}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 80}, {Name: "node2", Score: 30}},
resources: defaultResources,
},
{
@ -298,7 +298,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
},
resources: extendedResourceSet,
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
},
{
// Honor extended resource if the pod requests.
@ -319,7 +319,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
},
resources: extendedResourceSet,
existingPods: nil,
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 40}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 40}},
},
{
// If the node doesn't have a resource
@ -335,7 +335,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 45}, {Name: "node2", Score: 45}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 45}, {Name: "node2", Score: 45}},
resources: []config.ResourceSpec{
{Name: extendedRes, Weight: 2},
{Name: string(v1.ResourceCPU), Weight: 1},
@ -369,22 +369,22 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
return
}
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
var gotScores framework.NodeScoreList
var gotScores fwk.NodeScoreList
for _, n := range test.nodes {
nodeInfo, err := snapshot.Get(n.Name)
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
if status.Code() != test.wantStatusCode {
t.Errorf("unexpected status code, want: %v, got: %v", test.wantStatusCode, status.Code())
}
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
gotScores = append(gotScores, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {

View file

@ -19,8 +19,8 @@ package noderesources
import (
"math"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
)
@ -65,7 +65,7 @@ func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []con
// MaxCustomPriorityScore may diverge from the max score used in the scheduler and defined by MaxNodeScore,
// therefore we need to scale the score returned by requested to capacity ratio to the score range
// used by the scheduler.
Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore),
Score: int64(point.Score) * (fwk.MaxNodeScore / config.MaxCustomPriorityScore),
})
}

View file

@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
@ -48,7 +49,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
requestedPod *v1.Pod
nodes []*v1.Node
existingPods []*v1.Pod
expectedScores framework.NodeScoreList
expectedScores fwk.NodeScoreList
resources []config.ResourceSpec
shape []config.UtilizationShapePoint
wantErrs field.ErrorList
@ -64,7 +65,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Obj(),
st.MakePod().Node("node1").Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: fwk.MaxNodeScore}, {Name: "node2", Score: fwk.MaxNodeScore}},
resources: defaultResources,
shape: shape,
},
@ -82,7 +83,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Obj(),
st.MakePod().Node("node1").Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
resources: defaultResources,
shape: shape,
},
@ -97,7 +98,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
resources: defaultResources,
shape: shape,
},
@ -130,9 +131,9 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
return
}
var gotScores framework.NodeScoreList
var gotScores fwk.NodeScoreList
for _, n := range test.nodes {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
@ -140,11 +141,11 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.requestedPod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
}
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
gotScores = append(gotScores, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {
@ -238,14 +239,14 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedScores framework.NodeScoreList
expectedScores fwk.NodeScoreList
name string
}{
{
// Node1 Score = Node2 Score = 0 as the incoming Pod doesn't request extended resource.
pod: st.MakePod().Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
@ -262,7 +263,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
// Node2 Score: 5
pod: st.MakePod().Req(extendedResource3).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 5}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 5}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{st.MakePod().Obj()},
},
@ -280,7 +281,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
// Node2 Score: 10
pod: st.MakePod().Req(extendedResource3).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 10}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 10}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{st.MakePod().Req(extendedResource3).Node("node2").Obj()},
},
@ -298,7 +299,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
// Node2 Score: 10
pod: st.MakePod().Req(extendedResource4).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 10}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 10}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
st.MakePod().Obj(),
@ -331,9 +332,9 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, n := range test.nodes {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
@ -341,11 +342,11 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedScores, gotList); diff != "" {
@ -378,7 +379,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedScores framework.NodeScoreList
expectedScores fwk.NodeScoreList
name string
}{
{
@ -411,7 +412,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: st.MakePod().Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
@ -444,7 +445,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: st.MakePod().Req(extendedResourcePod1).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 3}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 3}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
st.MakePod().Obj(),
@ -479,7 +480,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: st.MakePod().Req(extendedResourcePod1).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 7}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 7}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{st.MakePod().Req(extendedResourcePod2).Node("node2").Obj()},
},
@ -527,7 +528,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: st.MakePod().Req(extendedResourcePod2).Obj(),
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
expectedScores: []framework.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 5}},
expectedScores: []fwk.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 5}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
st.MakePod().Obj(),
@ -563,22 +564,22 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
}
var gotScores framework.NodeScoreList
var gotScores fwk.NodeScoreList
for _, n := range test.nodes {
nodeInfo, err := snapshot.Get(n.Name)
if err != nil {
t.Errorf("failed to get node %q from snapshot: %v", n.Name, err)
}
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
}
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
gotScores = append(gotScores, fwk.NodeScore{Name: n.Name, Score: score})
}
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {

View file

@ -24,7 +24,6 @@ import (
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -36,8 +35,8 @@ type NodeUnschedulable struct {
enableSchedulingQueueHint bool
}
var _ framework.FilterPlugin = &NodeUnschedulable{}
var _ framework.EnqueueExtensions = &NodeUnschedulable{}
var _ fwk.FilterPlugin = &NodeUnschedulable{}
var _ fwk.EnqueueExtensions = &NodeUnschedulable{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.NodeUnschedulable
@ -150,6 +149,6 @@ func (pl *NodeUnschedulable) Filter(ctx context.Context, _ fwk.CycleState, pod *
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodeUnschedulable{enableSchedulingQueueHint: fts.EnableSchedulingQueueHint}, nil
}

View file

@ -83,7 +83,7 @@ func TestNodeUnschedulable(t *testing.T) {
if err != nil {
t.Fatalf("creating plugin: %v", err)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}

View file

@ -33,7 +33,6 @@ import (
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -70,9 +69,9 @@ type CSILimits struct {
translator InTreeToCSITranslator
}
var _ framework.PreFilterPlugin = &CSILimits{}
var _ framework.FilterPlugin = &CSILimits{}
var _ framework.EnqueueExtensions = &CSILimits{}
var _ fwk.PreFilterPlugin = &CSILimits{}
var _ fwk.FilterPlugin = &CSILimits{}
var _ fwk.EnqueueExtensions = &CSILimits{}
// CSIName is the name of the plugin used in the plugin registry and configurations.
const CSIName = names.NodeVolumeLimits
@ -234,7 +233,7 @@ func (pl *CSILimits) isSchedulableAfterCSINodeUpdated(logger klog.Logger, pod *v
// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *CSILimits) PreFilter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, _ []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *CSILimits) PreFilter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, _ []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
@ -247,7 +246,7 @@ func (pl *CSILimits) PreFilter(ctx context.Context, _ fwk.CycleState, pod *v1.Po
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *CSILimits) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -547,7 +546,7 @@ func (pl *CSILimits) getCSIDriverInfoFromSC(logger klog.Logger, csiNode *storage
}
// NewCSI initializes a new plugin and returns it.
func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
func NewCSI(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()

View file

@ -27,7 +27,6 @@ import (
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
const preFilterStateKey = "PreFilter" + Name
@ -137,7 +136,7 @@ func (p *criticalPaths) update(tpVal string, num int) {
}
// PreFilter invoked at the prefilter extension point.
func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
s, err := pl.calPreFilterState(ctx, pod, nodes)
if err != nil {
return nil, fwk.AsStatus(err)
@ -150,7 +149,7 @@ func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState fwk.Cycle
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *PodTopologySpread) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}

View file

@ -31,8 +31,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -61,9 +59,9 @@ var systemDefaultConstraints = []v1.TopologySpreadConstraint{
// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied.
type PodTopologySpread struct {
systemDefaulted bool
parallelizer parallelize.Parallelizer
parallelizer fwk.Parallelizer
defaultConstraints []v1.TopologySpreadConstraint
sharedLister framework.SharedLister
sharedLister fwk.SharedLister
services corelisters.ServiceLister
replicationCtrls corelisters.ReplicationControllerLister
replicaSets appslisters.ReplicaSetLister
@ -73,11 +71,11 @@ type PodTopologySpread struct {
enableSchedulingQueueHint bool
}
var _ framework.PreFilterPlugin = &PodTopologySpread{}
var _ framework.FilterPlugin = &PodTopologySpread{}
var _ framework.PreScorePlugin = &PodTopologySpread{}
var _ framework.ScorePlugin = &PodTopologySpread{}
var _ framework.EnqueueExtensions = &PodTopologySpread{}
var _ fwk.PreFilterPlugin = &PodTopologySpread{}
var _ fwk.FilterPlugin = &PodTopologySpread{}
var _ fwk.PreScorePlugin = &PodTopologySpread{}
var _ fwk.ScorePlugin = &PodTopologySpread{}
var _ fwk.EnqueueExtensions = &PodTopologySpread{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.PodTopologySpread
@ -88,7 +86,7 @@ func (pl *PodTopologySpread) Name() string {
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if h.SnapshotSharedLister() == nil {
return nil, fmt.Errorf("SnapshotSharedlister is nil")
}

View file

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
const preScoreStateKey = "PreScore" + Name
@ -223,7 +222,7 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState fwk.CycleStat
}
// NormalizeScore invoked after scoring all nodes.
func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return fwk.AsStatus(err)
@ -255,17 +254,17 @@ func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState fwk.
continue
}
if maxScore == 0 {
scores[i].Score = framework.MaxNodeScore
scores[i].Score = fwk.MaxNodeScore
continue
}
s := scores[i].Score
scores[i].Score = framework.MaxNodeScore * (maxScore + minScore - s) / maxScore
scores[i].Score = fwk.MaxNodeScore * (maxScore + minScore - s) / maxScore
}
return nil
}
// ScoreExtensions of the Score plugin.
func (pl *PodTopologySpread) ScoreExtensions() framework.ScoreExtensions {
func (pl *PodTopologySpread) ScoreExtensions() fwk.ScoreExtensions {
return pl
}

View file

@ -628,7 +628,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
nodes []*v1.Node
failedNodes []*v1.Node // nodes + failedNodes = all nodes
objs []runtime.Object
want framework.NodeScoreList
want fwk.NodeScoreList
enableNodeInclusionPolicy bool
enableMatchLabelKeys bool
}{
@ -647,7 +647,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 100},
},
@ -669,7 +669,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
},
},
@ -686,7 +686,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 100},
},
@ -712,7 +712,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-d").Label(v1.LabelHostname, "node-d").Obj(),
},
failedNodes: []*v1.Node{},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 20},
{Name: "node-b", Score: 60},
{Name: "node-c", Score: 100},
@ -738,7 +738,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-c").Label(v1.LabelHostname, "node-c").Obj(),
st.MakeNode().Name("node-d").Label(v1.LabelHostname, "node-d").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 100},
{Name: "node-c", Score: 100},
@ -766,7 +766,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-d").Label(v1.LabelHostname, "node-d").Obj(),
},
failedNodes: []*v1.Node{},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 33}, // +13, compared to maxSkew=1
{Name: "node-b", Score: 66}, // +6, compared to maxSkew=1
{Name: "node-c", Score: 100},
@ -798,7 +798,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-d").Label(v1.LabelHostname, "node-d").Obj(),
},
failedNodes: []*v1.Node{},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 44}, // +16 compared to maxSkew=1
{Name: "node-b", Score: 66}, // +9 compared to maxSkew=1
{Name: "node-c", Score: 77}, // +6 compared to maxSkew=1
@ -831,7 +831,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
objs: []runtime.Object{
&v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}},
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
// Same scores as if we were using one spreading constraint.
{Name: "node-a", Score: 44},
{Name: "node-b", Score: 66},
@ -865,7 +865,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label(v1.LabelHostname, "node-y").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 33},
{Name: "node-b", Score: 83},
{Name: "node-x", Score: 100},
@ -897,7 +897,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label(v1.LabelHostname, "node-y").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 16},
{Name: "node-b", Score: 0},
{Name: "node-x", Score: 100},
@ -929,7 +929,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("zone", "zone2").Label(v1.LabelHostname, "node-y").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 75},
{Name: "node-b", Score: 75},
{Name: "node-x", Score: 100},
@ -961,7 +961,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Label(v1.LabelHostname, "node-b").Obj(),
st.MakeNode().Name("node-y").Label("zone", "zone2").Label(v1.LabelHostname, "node-y").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-x", Score: 63},
},
@ -988,7 +988,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Obj(),
st.MakeNode().Name("node-y").Label("zone", "zone2").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 85},
{Name: "node-x", Score: 100},
},
@ -1022,7 +1022,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-y").Label("zone", "zone2").Label(v1.LabelHostname, "node-y").Obj(),
},
failedNodes: []*v1.Node{},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 60},
{Name: "node-b", Score: 20},
{Name: "node-x", Score: 100},
@ -1049,7 +1049,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-y").Label("zone", "zone2").Label(v1.LabelHostname, "node-y").Obj(),
},
failedNodes: []*v1.Node{},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 60},
{Name: "node-x", Score: 40},
@ -1078,7 +1078,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("zone", "zone2").Label(v1.LabelHostname, "node-y").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 50},
{Name: "node-b", Score: 25},
{Name: "node-x", Score: 100},
@ -1099,7 +1099,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 33},
},
@ -1117,7 +1117,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-a").Node("node-a").Label("foo", "").Terminating().Obj(),
st.MakePod().Name("p-b").Node("node-b").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 100},
{Name: "node-b", Score: 0},
},
@ -1148,7 +1148,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 75},
{Name: "node-b", Score: 75},
{Name: "node-c", Score: 100},
@ -1171,7 +1171,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 0},
{Name: "node-b", Score: 33},
{Name: "node-c", Score: 100},
@ -1195,7 +1195,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 66},
{Name: "node-b", Score: 100},
{Name: "node-c", Score: 100},
@ -1219,7 +1219,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 0},
{Name: "node-b", Score: 33},
{Name: "node-c", Score: 100},
@ -1243,7 +1243,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 66},
{Name: "node-b", Score: 100},
{Name: "node-c", Score: 100},
@ -1266,7 +1266,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 0},
{Name: "node-b", Score: 33},
{Name: "node-c", Score: 100},
@ -1289,7 +1289,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 66},
{Name: "node-b", Score: 100},
{Name: "node-c", Score: 100},
@ -1314,7 +1314,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-c").Label("zone", "zone2").Label(v1.LabelHostname, "node-c").Obj(),
st.MakeNode().Name("node-d").Label("zone", "zone2").Label(v1.LabelHostname, "node-d").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 60},
{Name: "node-b", Score: 20},
{Name: "node-c", Score: 60},
@ -1340,7 +1340,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-c").Label("zone", "zone2").Label(v1.LabelHostname, "node-c").Obj(),
st.MakeNode().Name("node-d").Label("zone", "zone2").Label(v1.LabelHostname, "node-d").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 60},
{Name: "node-b", Score: 20},
{Name: "node-c", Score: 60},
@ -1367,7 +1367,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
st.MakeNode().Name("node-c").Label("zone", "zone2").Label(v1.LabelHostname, "node-c").Obj(),
st.MakeNode().Name("node-d").Label("zone", "zone2").Label(v1.LabelHostname, "node-d").Obj(),
},
want: []framework.NodeScore{
want: []fwk.NodeScore{
{Name: "node-a", Score: 60},
{Name: "node-b", Score: 20},
{Name: "node-c", Score: 60},
@ -1394,7 +1394,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
t.Errorf("unexpected error: %v", status)
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, n := range tt.nodes {
nodeName := n.Name
nodeInfo, err := p.sharedLister.NodeInfos().Get(n.Name)
@ -1405,7 +1405,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.NormalizeScore(ctx, state, tt.pod, gotList)
@ -1479,14 +1479,14 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, nodeInfo := range nodeInfos {
nodeName := nodeInfo.Node().Name
score, status := p.Score(ctx, state, tt.pod, nodeInfo)
if !status.IsSuccess() {
b.Fatalf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.NormalizeScore(ctx, state, tt.pod, gotList)

View file

@ -22,7 +22,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@ -32,7 +31,7 @@ const Name = names.PrioritySort
// PrioritySort is a plugin that implements Priority based sorting.
type PrioritySort struct{}
var _ framework.QueueSortPlugin = &PrioritySort{}
var _ fwk.QueueSortPlugin = &PrioritySort{}
// Name returns name of the plugin.
func (pl *PrioritySort) Name() string {
@ -49,6 +48,6 @@ func (pl *PrioritySort) Less(pInfo1, pInfo2 fwk.QueuedPodInfo) bool {
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return &PrioritySort{}, nil
}

View file

@ -25,7 +25,6 @@ import (
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -39,8 +38,8 @@ type SchedulingGates struct {
enableSchedulingQueueHint bool
}
var _ framework.PreEnqueuePlugin = &SchedulingGates{}
var _ framework.EnqueueExtensions = &SchedulingGates{}
var _ fwk.PreEnqueuePlugin = &SchedulingGates{}
var _ fwk.EnqueueExtensions = &SchedulingGates{}
func (pl *SchedulingGates) Name() string {
return Name
@ -74,7 +73,7 @@ func (pl *SchedulingGates) EventsToRegister(_ context.Context) ([]fwk.ClusterEve
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &SchedulingGates{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil

View file

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
st "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/kubernetes/test/utils/ktesting"
@ -56,7 +55,7 @@ func TestPreEnqueue(t *testing.T) {
t.Fatalf("Creating plugin: %v", err)
}
got := p.(framework.PreEnqueuePlugin).PreEnqueue(ctx, tt.pod)
got := p.(fwk.PreEnqueuePlugin).PreEnqueue(ctx, tt.pod)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("unexpected status (-want, +got):\n%s", diff)
}

View file

@ -25,7 +25,6 @@ import (
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@ -34,14 +33,14 @@ import (
// TaintToleration is a plugin that checks if a pod tolerates a node's taints.
type TaintToleration struct {
handle framework.Handle
handle fwk.Handle
enableSchedulingQueueHint bool
}
var _ framework.FilterPlugin = &TaintToleration{}
var _ framework.PreScorePlugin = &TaintToleration{}
var _ framework.ScorePlugin = &TaintToleration{}
var _ framework.EnqueueExtensions = &TaintToleration{}
var _ fwk.FilterPlugin = &TaintToleration{}
var _ fwk.PreScorePlugin = &TaintToleration{}
var _ fwk.ScorePlugin = &TaintToleration{}
var _ fwk.EnqueueExtensions = &TaintToleration{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -194,17 +193,17 @@ func (pl *TaintToleration) Score(ctx context.Context, state fwk.CycleState, pod
}
// NormalizeScore invoked after scoring all nodes.
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(framework.MaxNodeScore, true, scores)
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(fwk.MaxNodeScore, true, scores)
}
// ScoreExtensions of the Score plugin.
func (pl *TaintToleration) ScoreExtensions() framework.ScoreExtensions {
func (pl *TaintToleration) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &TaintToleration{
handle: h,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,

View file

@ -59,7 +59,7 @@ func TestTaintTolerationScore(t *testing.T) {
name string
pod *v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
expectedList fwk.NodeScoreList
}{
// basic test case
{
@ -82,8 +82,8 @@ func TestTaintTolerationScore(t *testing.T) {
Effect: v1.TaintEffectPreferNoSchedule,
}}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: framework.MaxNodeScore},
expectedList: []fwk.NodeScore{
{Name: "nodeA", Score: fwk.MaxNodeScore},
{Name: "nodeB", Score: 0},
},
},
@ -124,10 +124,10 @@ func TestTaintTolerationScore(t *testing.T) {
},
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: framework.MaxNodeScore},
{Name: "nodeC", Score: framework.MaxNodeScore},
expectedList: []fwk.NodeScore{
{Name: "nodeA", Score: fwk.MaxNodeScore},
{Name: "nodeB", Score: fwk.MaxNodeScore},
{Name: "nodeC", Score: fwk.MaxNodeScore},
},
},
// the count of taints on a node that are not tolerated by pod, matters.
@ -160,8 +160,8 @@ func TestTaintTolerationScore(t *testing.T) {
},
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: framework.MaxNodeScore},
expectedList: []fwk.NodeScore{
{Name: "nodeA", Score: fwk.MaxNodeScore},
{Name: "nodeB", Score: 50},
{Name: "nodeC", Score: 0},
},
@ -203,9 +203,9 @@ func TestTaintTolerationScore(t *testing.T) {
},
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: framework.MaxNodeScore},
expectedList: []fwk.NodeScore{
{Name: "nodeA", Score: fwk.MaxNodeScore},
{Name: "nodeB", Score: fwk.MaxNodeScore},
{Name: "nodeC", Score: 0},
},
},
@ -224,8 +224,8 @@ func TestTaintTolerationScore(t *testing.T) {
},
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: framework.MaxNodeScore},
expectedList: []fwk.NodeScore{
{Name: "nodeA", Score: fwk.MaxNodeScore},
{Name: "nodeB", Score: 0},
},
},
@ -245,21 +245,21 @@ func TestTaintTolerationScore(t *testing.T) {
t.Fatalf("creating plugin: %v", err)
}
nodeInfos := tf.BuildNodeInfos(test.nodes)
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
status := p.(fwk.PreScorePlugin).PreScore(ctx, state, test.pod, nodeInfos)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
var gotList framework.NodeScoreList
var gotList fwk.NodeScoreList
for _, nodeInfo := range nodeInfos {
nodeName := nodeInfo.Node().Name
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
score, status := p.(fwk.ScorePlugin).Score(ctx, state, test.pod, nodeInfo)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
gotList = append(gotList, fwk.NodeScore{Name: nodeName, Score: score})
}
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
status = p.(fwk.ScorePlugin).ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
@ -349,7 +349,7 @@ func TestTaintTolerationFilter(t *testing.T) {
if err != nil {
t.Fatalf("creating plugin: %v", err)
}
gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected status (-want,+got):\n%s", diff)
}

View file

@ -20,12 +20,12 @@ import (
"context"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
@ -38,9 +38,9 @@ func SetupPluginWithInformers(
tb testing.TB,
pf frameworkruntime.PluginFactory,
config runtime.Object,
sharedLister framework.SharedLister,
sharedLister fwk.SharedLister,
objs []runtime.Object,
) framework.Plugin {
) fwk.Plugin {
objs = append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, objs...)
informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
fh, err := frameworkruntime.NewFramework(ctx, nil, nil,
@ -65,8 +65,8 @@ func SetupPlugin(
tb testing.TB,
pf frameworkruntime.PluginFactory,
config runtime.Object,
sharedLister framework.SharedLister,
) framework.Plugin {
sharedLister fwk.SharedLister,
) fwk.Plugin {
fh, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(sharedLister))
if err != nil {

View file

@ -19,8 +19,8 @@ package volumebinding
import (
"testing"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
)
@ -34,7 +34,7 @@ func TestScore(t *testing.T) {
for _, point := range defaultShapePoint {
defaultShape = append(defaultShape, helper.FunctionShapePoint{
Utilization: int64(point.Utilization),
Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore),
Score: int64(point.Score) * (fwk.MaxNodeScore / config.MaxCustomPriorityScore),
})
}
type scoreCase struct {

View file

@ -35,7 +35,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@ -79,13 +78,13 @@ type VolumeBinding struct {
fts feature.Features
}
var _ framework.PreFilterPlugin = &VolumeBinding{}
var _ framework.FilterPlugin = &VolumeBinding{}
var _ framework.ReservePlugin = &VolumeBinding{}
var _ framework.PreBindPlugin = &VolumeBinding{}
var _ framework.PreScorePlugin = &VolumeBinding{}
var _ framework.ScorePlugin = &VolumeBinding{}
var _ framework.EnqueueExtensions = &VolumeBinding{}
var _ fwk.PreFilterPlugin = &VolumeBinding{}
var _ fwk.FilterPlugin = &VolumeBinding{}
var _ fwk.ReservePlugin = &VolumeBinding{}
var _ fwk.PreBindPlugin = &VolumeBinding{}
var _ fwk.PreScorePlugin = &VolumeBinding{}
var _ fwk.ScorePlugin = &VolumeBinding{}
var _ fwk.EnqueueExtensions = &VolumeBinding{}
// Name is the name of the plugin used in Registry and configurations.
const Name = names.VolumeBinding
@ -350,7 +349,7 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate PVCs bound. If not all immediate PVCs are bound, an
// UnschedulableAndUnresolvable is returned.
func (pl *VolumeBinding) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *VolumeBinding) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
// If pod does not reference any PVC, we don't need to do anything.
if hasPVC, err := pl.podHasPVCs(pod); err != nil {
@ -383,7 +382,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state fwk.CycleState, po
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeBinding) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *VolumeBinding) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -516,7 +515,7 @@ func (pl *VolumeBinding) Score(ctx context.Context, cs fwk.CycleState, pod *v1.P
}
// ScoreExtensions of the Score plugin.
func (pl *VolumeBinding) ScoreExtensions() framework.ScoreExtensions {
func (pl *VolumeBinding) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -607,7 +606,7 @@ func (pl *VolumeBinding) Unreserve(ctx context.Context, cs fwk.CycleState, pod *
}
// New initializes a new plugin and returns it.
func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(ctx context.Context, plArgs runtime.Object, fh fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := plArgs.(*config.VolumeBindingArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs)
@ -636,7 +635,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
for _, point := range args.Shape {
shape = append(shape, helper.FunctionShapePoint{
Utilization: int64(point.Utilization),
Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore),
Score: int64(point.Score) * (fwk.MaxNodeScore / config.MaxCustomPriorityScore),
})
}
scorer = buildScorerFunction(shape)

View file

@ -99,7 +99,7 @@ func TestVolumeBinding(t *testing.T) {
capacities []*storagev1.CSIStorageCapacity
fts feature.Features
args *config.VolumeBindingArgs
wantPreFilterResult *framework.PreFilterResult
wantPreFilterResult *fwk.PreFilterResult
wantPreFilterStatus *fwk.Status
wantStateAfterPreFilter *stateData
wantFilterStatus []*fwk.Status

View file

@ -37,13 +37,13 @@ import (
// VolumeRestrictions is a plugin that checks volume restrictions.
type VolumeRestrictions struct {
pvcLister corelisters.PersistentVolumeClaimLister
sharedLister framework.SharedLister
sharedLister fwk.SharedLister
enableSchedulingQueueHint bool
}
var _ framework.PreFilterPlugin = &VolumeRestrictions{}
var _ framework.FilterPlugin = &VolumeRestrictions{}
var _ framework.EnqueueExtensions = &VolumeRestrictions{}
var _ fwk.PreFilterPlugin = &VolumeRestrictions{}
var _ fwk.FilterPlugin = &VolumeRestrictions{}
var _ fwk.EnqueueExtensions = &VolumeRestrictions{}
var _ fwk.StateData = &preFilterState{}
const (
@ -163,7 +163,7 @@ func needsRestrictionsCheck(v v1.Volume) bool {
}
// PreFilter computes and stores cycleState containing details for enforcing ReadWriteOncePod.
func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
needsCheck := false
for i := range pod.Spec.Volumes {
if needsRestrictionsCheck(pod.Spec.Volumes[i]) {
@ -292,7 +292,7 @@ func satisfyReadWriteOncePod(ctx context.Context, state *preFilterState) *fwk.St
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeRestrictions) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *VolumeRestrictions) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
@ -414,7 +414,7 @@ func (pl *VolumeRestrictions) isSchedulableAfterPodDeleted(logger klog.Logger, p
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
sharedLister := handle.SnapshotSharedLister()

View file

@ -106,13 +106,13 @@ func TestGCEDiskConflicts(t *testing.T) {
defer cancel()
p := newPlugin(ctx, t)
cycleState := framework.NewCycleState()
_, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterGotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" {
t.Errorf("Unexpected PreFilter status (-want, +got): %s", diff)
}
// If PreFilter fails, then Filter will not run.
if test.preFilterWantStatus.IsSuccess() {
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected Filter status (-want, +got): %s", diff)
}
@ -181,13 +181,13 @@ func TestAWSDiskConflicts(t *testing.T) {
defer cancel()
p := newPlugin(ctx, t)
cycleState := framework.NewCycleState()
_, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterGotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" {
t.Errorf("Unexpected PreFilter status (-want, +got): %s", diff)
}
// If PreFilter fails, then Filter will not run.
if test.preFilterWantStatus.IsSuccess() {
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected Filter status (-want, +got): %s", diff)
}
@ -262,13 +262,13 @@ func TestRBDDiskConflicts(t *testing.T) {
defer cancel()
p := newPlugin(ctx, t)
cycleState := framework.NewCycleState()
_, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterGotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" {
t.Errorf("Unexpected PreFilter status (-want, +got): %s", diff)
}
// If PreFilter fails, then Filter will not run.
if test.preFilterWantStatus.IsSuccess() {
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected Filter status (-want, +got): %s", diff)
}
@ -343,13 +343,13 @@ func TestISCSIDiskConflicts(t *testing.T) {
defer cancel()
p := newPlugin(ctx, t)
cycleState := framework.NewCycleState()
_, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterGotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" {
t.Errorf("Unexpected PreFilter status (-want, +got): %s", diff)
}
// If PreFilter fails, then Filter will not run.
if test.preFilterWantStatus.IsSuccess() {
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected Filter status (-want, +got): %s", diff)
}
@ -471,13 +471,13 @@ func TestAccessModeConflicts(t *testing.T) {
defer cancel()
p := newPluginWithListers(ctx, t, test.existingPods, test.existingNodes, test.existingPVCs)
cycleState := framework.NewCycleState()
_, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
_, preFilterGotStatus := p.(fwk.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod, nil)
if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" {
t.Errorf("Unexpected PreFilter status (-want, +got): %s", diff)
}
// If PreFilter fails, then Filter will not run.
if test.preFilterWantStatus.IsSuccess() {
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
gotStatus := p.(fwk.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
t.Errorf("Unexpected Filter status (-want, +got): %s", diff)
}
@ -784,12 +784,12 @@ func Test_isSchedulableAfterPersistentVolumeClaimChange(t *testing.T) {
}
}
func newPlugin(ctx context.Context, t *testing.T) framework.Plugin {
func newPlugin(ctx context.Context, t *testing.T) fwk.Plugin {
return newPluginWithListers(ctx, t, nil, nil, nil)
}
func newPluginWithListers(ctx context.Context, t *testing.T, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim) framework.Plugin {
pluginFactory := func(ctx context.Context, plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func newPluginWithListers(ctx context.Context, t *testing.T, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim) fwk.Plugin {
pluginFactory := func(ctx context.Context, plArgs runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return New(ctx, plArgs, fh, feature.Features{})
}
snapshot := cache.NewSnapshot(pods, nodes)

View file

@ -33,7 +33,6 @@ import (
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
@ -47,9 +46,9 @@ type VolumeZone struct {
enableSchedulingQueueHint bool
}
var _ framework.FilterPlugin = &VolumeZone{}
var _ framework.PreFilterPlugin = &VolumeZone{}
var _ framework.EnqueueExtensions = &VolumeZone{}
var _ fwk.FilterPlugin = &VolumeZone{}
var _ fwk.PreFilterPlugin = &VolumeZone{}
var _ fwk.EnqueueExtensions = &VolumeZone{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@ -109,7 +108,7 @@ func (pl *VolumeZone) Name() string {
//
// Currently, this is only supported with PersistentVolumeClaims,
// and only looks for the bound PersistentVolume.
func (pl *VolumeZone) PreFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *VolumeZone) PreFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
podPVTopologies, status := pl.getPVbyPod(logger, pod)
if !status.IsSuccess() {
@ -168,7 +167,7 @@ func (pl *VolumeZone) getPVbyPod(logger klog.Logger, pod *v1.Pod) ([]pvTopology,
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *VolumeZone) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -397,7 +396,7 @@ func (pl *VolumeZone) getPVTopologies(logger klog.Logger, pv *v1.PersistentVolum
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()

View file

@ -837,7 +837,7 @@ func BenchmarkVolumeZone(b *testing.B) {
}
}
func newPluginWithListers(ctx context.Context, tb testing.TB, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim, pvs []*v1.PersistentVolume) framework.Plugin {
func newPluginWithListers(ctx context.Context, tb testing.TB, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim, pvs []*v1.PersistentVolume) fwk.Plugin {
snapshot := cache.NewSnapshot(pods, nodes)
objects := make([]runtime.Object, 0, len(pvcs))

View file

@ -129,7 +129,7 @@ type Interface interface {
type Evaluator struct {
PluginName string
Handler framework.Handle
Handler fwk.Handle
PodLister corelisters.PodLister
PdbLister policylisters.PodDisruptionBudgetLister
@ -146,7 +146,7 @@ type Evaluator struct {
Interface
}
func NewEvaluator(pluginName string, fh framework.Handle, i Interface, enableAsyncPreemption bool) *Evaluator {
func NewEvaluator(pluginName string, fh fwk.Handle, i Interface, enableAsyncPreemption bool) *Evaluator {
podLister := fh.SharedInformerFactory().Core().V1().Pods().Lister()
pdbLister := fh.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister()
@ -231,7 +231,7 @@ func (ev *Evaluator) IsPodRunningPreemption(podUID types.UID) bool {
//
// - <non-nil PostFilterResult, Success>. It's the regular happy path
// and the non-empty nominatedNodeName will be applied to the preemptor pod.
func (ev *Evaluator) Preempt(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (ev *Evaluator) Preempt(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
// 0) Fetch the latest version of <pod>.
@ -306,7 +306,7 @@ func (ev *Evaluator) Preempt(ctx context.Context, state fwk.CycleState, pod *v1.
// FindCandidates calculates a slice of preemption candidates.
// Each candidate is executable to make the given <pod> schedulable.
func (ev *Evaluator) findCandidates(ctx context.Context, state fwk.CycleState, allNodes []fwk.NodeInfo, pod *v1.Pod, m framework.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) {
func (ev *Evaluator) findCandidates(ctx context.Context, state fwk.CycleState, allNodes []fwk.NodeInfo, pod *v1.Pod, m fwk.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) {
if len(allNodes) == 0 {
return nil, nil, errors.New("no nodes available")
}
@ -468,12 +468,12 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.
// clearNominatedNodeName internally submit a patch request to API server
// to set each pods[*].Status.NominatedNodeName> to "".
func clearNominatedNodeName(ctx context.Context, cs clientset.Interface, apiCacher framework.APICacher, pods ...*v1.Pod) utilerrors.Aggregate {
func clearNominatedNodeName(ctx context.Context, cs clientset.Interface, apiCacher fwk.APICacher, pods ...*v1.Pod) utilerrors.Aggregate {
var errs []error
for _, p := range pods {
if apiCacher != nil {
// When API cacher is available, use it to clear the NominatedNodeName.
_, err := apiCacher.PatchPodStatus(p, nil, &framework.NominatingInfo{NominatedNodeName: "", NominatingMode: framework.ModeOverride})
_, err := apiCacher.PatchPodStatus(p, nil, &fwk.NominatingInfo{NominatedNodeName: "", NominatingMode: fwk.ModeOverride})
if err != nil {
errs = append(errs, err)
}
@ -716,7 +716,7 @@ func pickOneNodeForPreemption(logger klog.Logger, nodesToVictims map[string]*ext
// manipulation of NodeInfo and PreFilter state per nominated pod. It may not be
// worth the complexity, especially because we generally expect to have a very
// small number of nominated pods per node.
func getLowerPriorityNominatedPods(logger klog.Logger, pn framework.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod {
func getLowerPriorityNominatedPods(logger klog.Logger, pn fwk.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod {
podInfos := pn.NominatedPodsForNode(nodeName)
if len(podInfos) == 0 {

View file

@ -932,7 +932,7 @@ func (f *fakeExtender) IsIgnorable() bool {
func (f *fakeExtender) ProcessPreemption(
_ *v1.Pod,
victims map[string]*extenderv1.Victims,
_ framework.NodeInfoLister,
_ fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
if f.supportsPreemption {
if f.errProcessPreemption {
@ -1006,21 +1006,21 @@ func TestCallExtenders(t *testing.T) {
)
tests := []struct {
name string
extenders []framework.Extender
extenders []fwk.Extender
candidates []Candidate
wantStatus *fwk.Status
wantCandidates []Candidate
}{
{
name: "no extenders",
extenders: []framework.Extender{},
extenders: []fwk.Extender{},
candidates: makeCandidates(node1Name, victim),
wantStatus: nil,
wantCandidates: makeCandidates(node1Name, victim),
},
{
name: "one extender supports preemption",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithSupportsPreemption(true),
},
candidates: makeCandidates(node1Name, victim),
@ -1029,7 +1029,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender with return no victims",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithSupportsPreemption(true).WithReturnNoVictims(true),
},
candidates: makeCandidates(node1Name, victim),
@ -1038,7 +1038,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender does not support preemption",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithSupportsPreemption(false),
},
candidates: makeCandidates(node1Name, victim),
@ -1047,7 +1047,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender with no return victims and is ignorable",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithSupportsPreemption(true).
WithReturnNoVictims(true).WithIgnorable(true),
},
@ -1057,7 +1057,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender returns error and is ignorable",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithIgnorable(true).
WithSupportsPreemption(true).WithErrProcessPreemption(true),
},
@ -1067,7 +1067,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender returns error and is not ignorable",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithErrProcessPreemption(true).
WithSupportsPreemption(true),
},
@ -1077,7 +1077,7 @@ func TestCallExtenders(t *testing.T) {
},
{
name: "one extender with empty victims input",
extenders: []framework.Extender{
extenders: []fwk.Extender{
newFakeExtender().WithSupportsPreemption(true),
},
candidates: []Candidate{},
@ -1202,7 +1202,7 @@ func TestRemoveNominatedNodeName(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var apiCacher framework.APICacher
var apiCacher fwk.APICacher
if asyncAPICallsEnabled {
apiDispatcher := apidispatcher.New(cs, 16, apicalls.Relevances)
apiDispatcher.Run(logger)

View file

@ -53,44 +53,44 @@ const (
// plugins.
type frameworkImpl struct {
registry Registry
snapshotSharedLister framework.SharedLister
snapshotSharedLister fwk.SharedLister
waitingPods *waitingPodsMap
scorePluginWeight map[string]int
preEnqueuePlugins []framework.PreEnqueuePlugin
enqueueExtensions []framework.EnqueueExtensions
queueSortPlugins []framework.QueueSortPlugin
preFilterPlugins []framework.PreFilterPlugin
filterPlugins []framework.FilterPlugin
postFilterPlugins []framework.PostFilterPlugin
preScorePlugins []framework.PreScorePlugin
scorePlugins []framework.ScorePlugin
reservePlugins []framework.ReservePlugin
preBindPlugins []framework.PreBindPlugin
bindPlugins []framework.BindPlugin
postBindPlugins []framework.PostBindPlugin
permitPlugins []framework.PermitPlugin
preEnqueuePlugins []fwk.PreEnqueuePlugin
enqueueExtensions []fwk.EnqueueExtensions
queueSortPlugins []fwk.QueueSortPlugin
preFilterPlugins []fwk.PreFilterPlugin
filterPlugins []fwk.FilterPlugin
postFilterPlugins []fwk.PostFilterPlugin
preScorePlugins []fwk.PreScorePlugin
scorePlugins []fwk.ScorePlugin
reservePlugins []fwk.ReservePlugin
preBindPlugins []fwk.PreBindPlugin
bindPlugins []fwk.BindPlugin
postBindPlugins []fwk.PostBindPlugin
permitPlugins []fwk.PermitPlugin
// pluginsMap contains all plugins, by name.
pluginsMap map[string]framework.Plugin
pluginsMap map[string]fwk.Plugin
clientSet clientset.Interface
kubeConfig *restclient.Config
eventRecorder events.EventRecorder
informerFactory informers.SharedInformerFactory
sharedDRAManager framework.SharedDRAManager
sharedDRAManager fwk.SharedDRAManager
logger klog.Logger
metricsRecorder *metrics.MetricAsyncRecorder
profileName string
percentageOfNodesToScore *int32
extenders []framework.Extender
framework.PodNominator
framework.PodActivator
extenders []fwk.Extender
fwk.PodNominator
fwk.PodActivator
apiDispatcher *apidispatcher.APIDispatcher
apiCacher framework.APICacher
apiCacher fwk.APICacher
parallelizer parallelize.Parallelizer
parallelizer fwk.Parallelizer
}
// extensionPoint encapsulates desired and applied set of plugins at a specific extension
@ -122,7 +122,7 @@ func (f *frameworkImpl) getExtensionPoints(plugins *config.Plugins) []extensionP
}
// Extenders returns the registered extenders.
func (f *frameworkImpl) Extenders() []framework.Extender {
func (f *frameworkImpl) Extenders() []fwk.Extender {
return f.extenders
}
@ -132,12 +132,12 @@ type frameworkOptions struct {
kubeConfig *restclient.Config
eventRecorder events.EventRecorder
informerFactory informers.SharedInformerFactory
sharedDRAManager framework.SharedDRAManager
snapshotSharedLister framework.SharedLister
sharedDRAManager fwk.SharedDRAManager
snapshotSharedLister fwk.SharedLister
metricsRecorder *metrics.MetricAsyncRecorder
podNominator framework.PodNominator
podActivator framework.PodActivator
extenders []framework.Extender
podNominator fwk.PodNominator
podActivator fwk.PodActivator
extenders []fwk.Extender
captureProfile CaptureProfile
parallelizer parallelize.Parallelizer
waitingPods *waitingPodsMap
@ -187,34 +187,34 @@ func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option
}
// WithSharedDRAManager sets SharedDRAManager for the framework.
func WithSharedDRAManager(sharedDRAManager framework.SharedDRAManager) Option {
func WithSharedDRAManager(sharedDRAManager fwk.SharedDRAManager) Option {
return func(o *frameworkOptions) {
o.sharedDRAManager = sharedDRAManager
}
}
// WithSnapshotSharedLister sets the SharedLister of the snapshot.
func WithSnapshotSharedLister(snapshotSharedLister framework.SharedLister) Option {
func WithSnapshotSharedLister(snapshotSharedLister fwk.SharedLister) Option {
return func(o *frameworkOptions) {
o.snapshotSharedLister = snapshotSharedLister
}
}
// WithPodNominator sets podNominator for the scheduling frameworkImpl.
func WithPodNominator(nominator framework.PodNominator) Option {
func WithPodNominator(nominator fwk.PodNominator) Option {
return func(o *frameworkOptions) {
o.podNominator = nominator
}
}
func WithPodActivator(activator framework.PodActivator) Option {
func WithPodActivator(activator fwk.PodActivator) Option {
return func(o *frameworkOptions) {
o.podActivator = activator
}
}
// WithExtenders sets extenders for the scheduling frameworkImpl.
func WithExtenders(extenders []framework.Extender) Option {
func WithExtenders(extenders []fwk.Extender) Option {
return func(o *frameworkOptions) {
o.extenders = extenders
}
@ -307,10 +307,10 @@ func NewFramework(ctx context.Context, r Registry, profile *config.KubeScheduler
if len(f.extenders) > 0 {
// Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework.
// We register a defaultEnqueueExtension to framework.ExtenderName here.
// We register a defaultEnqueueExtension to fwk.ExtenderName here.
// And, in the scheduling cycle, when Extenders reject some Nodes and the pod ends up being unschedulable,
// we put framework.ExtenderName to pInfo.UnschedulablePlugins.
f.enqueueExtensions = []framework.EnqueueExtensions{&defaultEnqueueExtension{pluginName: framework.ExtenderName}}
// we put fwk.ExtenderName to pInfo.UnschedulablePlugins.
f.enqueueExtensions = []fwk.EnqueueExtensions{&defaultEnqueueExtension{pluginName: framework.ExtenderName}}
}
if profile == nil {
@ -341,7 +341,7 @@ func NewFramework(ctx context.Context, r Registry, profile *config.KubeScheduler
PluginConfig: make([]config.PluginConfig, 0, len(pg)),
}
f.pluginsMap = make(map[string]framework.Plugin)
f.pluginsMap = make(map[string]fwk.Plugin)
for name, factory := range r {
// initialize only needed plugins.
if !pg.Has(name) {
@ -445,15 +445,15 @@ func (f *frameworkImpl) setInstrumentedPlugins() {
}
}
func (f *frameworkImpl) SetPodNominator(n framework.PodNominator) {
func (f *frameworkImpl) SetPodNominator(n fwk.PodNominator) {
f.PodNominator = n
}
func (f *frameworkImpl) SetPodActivator(a framework.PodActivator) {
func (f *frameworkImpl) SetPodActivator(a fwk.PodActivator) {
f.PodActivator = a
}
func (f *frameworkImpl) SetAPICacher(c framework.APICacher) {
func (f *frameworkImpl) SetAPICacher(c fwk.APICacher) {
f.apiCacher = c
}
@ -497,10 +497,10 @@ func getScoreWeights(f *frameworkImpl, plugins []config.Plugin) error {
}
// Checks totalPriority against MaxTotalScore to avoid overflow
if int64(f.scorePluginWeight[e.Name])*framework.MaxNodeScore > framework.MaxTotalScore-totalPriority {
if int64(f.scorePluginWeight[e.Name])*fwk.MaxNodeScore > fwk.MaxTotalScore-totalPriority {
return fmt.Errorf("total score of Score plugins could overflow")
}
totalPriority += int64(f.scorePluginWeight[e.Name]) * framework.MaxNodeScore
totalPriority += int64(f.scorePluginWeight[e.Name]) * fwk.MaxNodeScore
}
return nil
}
@ -623,23 +623,23 @@ func (f *frameworkImpl) expandMultiPointPlugins(logger klog.Logger, profile *con
return nil
}
func shouldHaveEnqueueExtensions(p framework.Plugin) bool {
func shouldHaveEnqueueExtensions(p fwk.Plugin) bool {
switch p.(type) {
// Only PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins can (should) have EnqueueExtensions.
// See the comment of EnqueueExtensions for more detailed reason here.
case framework.PreEnqueuePlugin, framework.PreFilterPlugin, framework.FilterPlugin, framework.ReservePlugin, framework.PermitPlugin:
case fwk.PreEnqueuePlugin, fwk.PreFilterPlugin, fwk.FilterPlugin, fwk.ReservePlugin, fwk.PermitPlugin:
return true
}
return false
}
func (f *frameworkImpl) fillEnqueueExtensions(p framework.Plugin) {
func (f *frameworkImpl) fillEnqueueExtensions(p fwk.Plugin) {
if !shouldHaveEnqueueExtensions(p) {
// Ignore EnqueueExtensions from plugin which isn't PreEnqueue, PreFilter, Filter, Reserve, and Permit.
return
}
ext, ok := p.(framework.EnqueueExtensions)
ext, ok := p.(fwk.EnqueueExtensions)
if !ok {
// If interface EnqueueExtensions is not implemented, register the default enqueue extensions
// to the plugin because we don't know which events the plugin is interested in.
@ -665,7 +665,7 @@ func (p *defaultEnqueueExtension) EventsToRegister(_ context.Context) ([]fwk.Clu
return framework.UnrollWildCardResource(), nil
}
func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]framework.Plugin) error {
func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]fwk.Plugin) error {
plugins := reflect.ValueOf(pluginList).Elem()
pluginType := plugins.Type().Elem()
set := sets.New[string]()
@ -692,17 +692,17 @@ func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, plugin
}
// PreEnqueuePlugins returns the registered preEnqueue plugins.
func (f *frameworkImpl) PreEnqueuePlugins() []framework.PreEnqueuePlugin {
func (f *frameworkImpl) PreEnqueuePlugins() []fwk.PreEnqueuePlugin {
return f.preEnqueuePlugins
}
// EnqueueExtensions returns the registered reenqueue plugins.
func (f *frameworkImpl) EnqueueExtensions() []framework.EnqueueExtensions {
func (f *frameworkImpl) EnqueueExtensions() []fwk.EnqueueExtensions {
return f.enqueueExtensions
}
// QueueSortFunc returns the function to sort pods in scheduling queue
func (f *frameworkImpl) QueueSortFunc() framework.LessFunc {
func (f *frameworkImpl) QueueSortFunc() fwk.LessFunc {
if f == nil {
// If frameworkImpl is nil, simply keep their order unchanged.
// NOTE: this is primarily for tests.
@ -723,7 +723,7 @@ func (f *frameworkImpl) QueueSortFunc() framework.LessFunc {
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
// If a non-success status is returned, then the scheduling cycle is aborted.
func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (_ *framework.PreFilterResult, status *fwk.Status, _ sets.Set[string]) {
func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (_ *fwk.PreFilterResult, status *fwk.Status, _ sets.Set[string]) {
startTime := time.Now()
skipPlugins := sets.New[string]()
defer func() {
@ -734,7 +734,7 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.Cycle
if err != nil {
return nil, fwk.AsStatus(fmt.Errorf("getting all nodes: %w", err)), nil
}
var result *framework.PreFilterResult
var result *fwk.PreFilterResult
pluginsWithNodes := sets.New[string]()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
@ -786,7 +786,7 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.Cycle
return result, returnStatus, pluginsWithNodes
}
func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilter(ctx, state, pod, nodes)
}
@ -831,7 +831,7 @@ func (f *frameworkImpl) RunPreFilterExtensionAddPod(
return nil
}
func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo)
}
@ -880,7 +880,7 @@ func (f *frameworkImpl) RunPreFilterExtensionRemovePod(
return nil
}
func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo)
}
@ -929,7 +929,7 @@ func (f *frameworkImpl) RunFilterPlugins(
return nil
}
func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl fwk.FilterPlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Filter(ctx, state, pod, nodeInfo)
}
@ -941,7 +941,7 @@ func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.Filter
// RunPostFilterPlugins runs the set of configured PostFilter plugins until the first
// Success, Error or UnschedulableAndUnresolvable is met; otherwise continues to execute all plugins.
func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (_ *framework.PostFilterResult, status *fwk.Status) {
func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (_ *fwk.PostFilterResult, status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@ -954,7 +954,7 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.Cycl
}
// `result` records the last meaningful(non-noop) PostFilterResult.
var result *framework.PostFilterResult
var result *fwk.PostFilterResult
var reasons []string
var rejectorPlugin string
for _, pl := range f.postFilterPlugins {
@ -971,7 +971,7 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.Cycl
} else if !s.IsRejected() {
// Any status other than Success, Unschedulable or UnschedulableAndUnresolvable is Error.
return nil, fwk.AsStatus(s.AsError()).WithPlugin(pl.Name())
} else if r != nil && r.Mode() != framework.ModeNoop {
} else if r != nil && r.Mode() != fwk.ModeNoop {
result = r
}
@ -986,7 +986,7 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.Cycl
return result, fwk.NewStatus(fwk.Unschedulable, reasons...).WithPlugin(rejectorPlugin)
}
func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl fwk.PostFilterPlugin, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PostFilter(ctx, state, pod, filteredNodeStatusMap)
}
@ -1056,7 +1056,7 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s
// addGENominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo.
func addGENominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, state fwk.CycleState, nodeInfo fwk.NodeInfo) (bool, fwk.CycleState, fwk.NodeInfo, error) {
func addGENominatedPods(ctx context.Context, fh fwk.Handle, pod *v1.Pod, state fwk.CycleState, nodeInfo fwk.NodeInfo) (bool, fwk.CycleState, fwk.NodeInfo, error) {
if fh == nil {
// This may happen only in tests.
return false, state, nodeInfo, nil
@ -1120,7 +1120,7 @@ func (f *frameworkImpl) RunPreScorePlugins(
return nil
}
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl fwk.PreScorePlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreScore(ctx, state, pod, nodes)
}
@ -1134,21 +1134,21 @@ func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreS
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status.
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (ns []framework.NodePluginScores, status *fwk.Status) {
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (ns []fwk.NodePluginScores, status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
allNodePluginScores := make([]framework.NodePluginScores, len(nodes))
allNodePluginScores := make([]fwk.NodePluginScores, len(nodes))
numPlugins := len(f.scorePlugins)
plugins := make([]framework.ScorePlugin, 0, numPlugins)
pluginToNodeScores := make(map[string]framework.NodeScoreList, numPlugins)
plugins := make([]fwk.ScorePlugin, 0, numPlugins)
pluginToNodeScores := make(map[string]fwk.NodeScoreList, numPlugins)
for _, pl := range f.scorePlugins {
if state.GetSkipScorePlugins().Has(pl.Name()) {
continue
}
plugins = append(plugins, pl)
pluginToNodeScores[pl.Name()] = make(framework.NodeScoreList, len(nodes))
pluginToNodeScores[pl.Name()] = make(fwk.NodeScoreList, len(nodes))
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -1180,7 +1180,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleStat
errCh.SendErrorWithCancel(err, cancel)
return
}
pluginToNodeScores[pl.Name()][index] = framework.NodeScore{
pluginToNodeScores[pl.Name()][index] = fwk.NodeScore{
Name: nodeName,
Score: s,
}
@ -1212,9 +1212,9 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleStat
// Apply score weight for each ScorePlugin in parallel,
// and then, build allNodePluginScores.
f.Parallelizer().Until(ctx, len(nodes), func(index int) {
nodePluginScores := framework.NodePluginScores{
nodePluginScores := fwk.NodePluginScores{
Name: nodes[index].Node().Name,
Scores: make([]framework.PluginScore, len(plugins)),
Scores: make([]fwk.PluginScore, len(plugins)),
}
for i, pl := range plugins {
@ -1222,13 +1222,13 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleStat
nodeScoreList := pluginToNodeScores[pl.Name()]
score := nodeScoreList[index].Score
if score > framework.MaxNodeScore || score < framework.MinNodeScore {
err := fmt.Errorf("plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), score, framework.MinNodeScore, framework.MaxNodeScore)
if score > fwk.MaxNodeScore || score < fwk.MinNodeScore {
err := fmt.Errorf("plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), score, fwk.MinNodeScore, fwk.MaxNodeScore)
errCh.SendErrorWithCancel(err, cancel)
return
}
weightedScore := score * int64(weight)
nodePluginScores.Scores[i] = framework.PluginScore{
nodePluginScores.Scores[i] = fwk.PluginScore{
Name: pl.Name(),
Score: weightedScore,
}
@ -1243,7 +1243,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleStat
return allNodePluginScores, nil
}
func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl fwk.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.Score(ctx, state, pod, nodeInfo)
}
@ -1253,7 +1253,7 @@ func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePl
return s, status
}
func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl framework.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeScoreList framework.NodeScoreList) *fwk.Status {
func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl fwk.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeScoreList fwk.NodeScoreList) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList)
}
@ -1302,7 +1302,7 @@ func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state fwk.CycleSt
return nil
}
func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl framework.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl fwk.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreBind(ctx, state, pod, nodeName)
}
@ -1356,7 +1356,7 @@ func (f *frameworkImpl) RunPreBindPreFlights(ctx context.Context, state fwk.Cycl
return returningStatus
}
func (f *frameworkImpl) runPreBindPreFlight(ctx context.Context, pl framework.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
func (f *frameworkImpl) runPreBindPreFlight(ctx context.Context, pl fwk.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreBindPreFlight(ctx, state, pod, nodeName)
}
@ -1405,7 +1405,7 @@ func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state fwk.CycleState
return status
}
func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp framework.BindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp fwk.BindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return bp.Bind(ctx, state, pod, nodeName)
}
@ -1436,7 +1436,7 @@ func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state fwk.CycleS
}
}
func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl framework.PostBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl fwk.PostBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.PostBind(ctx, state, pod, nodeName)
return
@ -1483,7 +1483,7 @@ func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state fwk.
return nil
}
func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl framework.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl fwk.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Reserve(ctx, state, pod, nodeName)
}
@ -1519,7 +1519,7 @@ func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state fw
}
}
func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl framework.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl fwk.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.Unreserve(ctx, state, pod, nodeName)
return
@ -1584,7 +1584,7 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state fwk.CycleSta
return nil
}
func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl framework.PermitPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) (*fwk.Status, time.Duration) {
func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl fwk.PermitPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) (*fwk.Status, time.Duration) {
if !state.ShouldRecordPluginMetrics() {
return pl.Permit(ctx, state, pod, nodeName)
}
@ -1629,17 +1629,17 @@ func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *fwk.Stat
// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains
// unchanged until a pod finishes "Reserve". There is no guarantee that the information
// remains unchanged after "Reserve".
func (f *frameworkImpl) SnapshotSharedLister() framework.SharedLister {
func (f *frameworkImpl) SnapshotSharedLister() fwk.SharedLister {
return f.snapshotSharedLister
}
// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map.
func (f *frameworkImpl) IterateOverWaitingPods(callback func(framework.WaitingPod)) {
func (f *frameworkImpl) IterateOverWaitingPods(callback func(fwk.WaitingPod)) {
f.waitingPods.iterate(callback)
}
// GetWaitingPod returns a reference to a WaitingPod given its UID.
func (f *frameworkImpl) GetWaitingPod(uid types.UID) framework.WaitingPod {
func (f *frameworkImpl) GetWaitingPod(uid types.UID) fwk.WaitingPod {
if wp := f.waitingPods.get(uid); wp != nil {
return wp
}
@ -1681,7 +1681,7 @@ func (f *frameworkImpl) ListPlugins() *config.Plugins {
extName := plugins.Type().Elem().Name()
var cfgs []config.Plugin
for i := 0; i < plugins.Len(); i++ {
name := plugins.Index(i).Interface().(framework.Plugin).Name()
name := plugins.Index(i).Interface().(fwk.Plugin).Name()
p := config.Plugin{Name: name}
if extName == "ScorePlugin" {
// Weights apply only to score plugins.
@ -1717,7 +1717,7 @@ func (f *frameworkImpl) SharedInformerFactory() informers.SharedInformerFactory
}
// SharedDRAManager returns the SharedDRAManager of the framework.
func (f *frameworkImpl) SharedDRAManager() framework.SharedDRAManager {
func (f *frameworkImpl) SharedDRAManager() fwk.SharedDRAManager {
return f.sharedDRAManager
}
@ -1754,7 +1754,7 @@ func (f *frameworkImpl) PercentageOfNodesToScore() *int32 {
}
// Parallelizer returns a parallelizer holding parallelism for scheduler.
func (f *frameworkImpl) Parallelizer() parallelize.Parallelizer {
func (f *frameworkImpl) Parallelizer() fwk.Parallelizer {
return f.parallelizer
}
@ -1770,7 +1770,7 @@ func (f *frameworkImpl) APIDispatcher() fwk.APIDispatcher {
// APICacher returns an apiCacher that can be used to dispatch API calls through scheduler's cache
// instead of directly using APIDispatcher().
// This requires SchedulerAsyncAPICalls feature gate to be enabled.
func (f *frameworkImpl) APICacher() framework.APICacher {
func (f *frameworkImpl) APICacher() fwk.APICacher {
if f.apiCacher == nil {
return nil
}

View file

@ -74,8 +74,8 @@ func init() {
// TestScoreWithNormalizePlugin implements ScoreWithNormalizePlugin interface.
// TestScorePlugin only implements ScorePlugin interface.
var _ framework.ScorePlugin = &TestScoreWithNormalizePlugin{}
var _ framework.ScorePlugin = &TestScorePlugin{}
var _ fwk.ScorePlugin = &TestScoreWithNormalizePlugin{}
var _ fwk.ScorePlugin = &TestScorePlugin{}
var statusCmpOpts = []cmp.Option{
cmp.Comparer(func(s1 *fwk.Status, s2 *fwk.Status) bool {
@ -89,7 +89,7 @@ var statusCmpOpts = []cmp.Option{
}),
}
func newScoreWithNormalizePlugin1(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newScoreWithNormalizePlugin1(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
var inj injectedResult
if err := DecodeInto(injArgs, &inj); err != nil {
return nil, err
@ -97,7 +97,7 @@ func newScoreWithNormalizePlugin1(_ context.Context, injArgs runtime.Object, f f
return &TestScoreWithNormalizePlugin{scoreWithNormalizePlugin1, inj}, nil
}
func newScoreWithNormalizePlugin2(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newScoreWithNormalizePlugin2(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
var inj injectedResult
if err := DecodeInto(injArgs, &inj); err != nil {
return nil, err
@ -105,7 +105,7 @@ func newScoreWithNormalizePlugin2(_ context.Context, injArgs runtime.Object, f f
return &TestScoreWithNormalizePlugin{scoreWithNormalizePlugin2, inj}, nil
}
func newScorePlugin1(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newScorePlugin1(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
var inj injectedResult
if err := DecodeInto(injArgs, &inj); err != nil {
return nil, err
@ -113,7 +113,7 @@ func newScorePlugin1(_ context.Context, injArgs runtime.Object, f framework.Hand
return &TestScorePlugin{scorePlugin1, inj}, nil
}
func newScorePlugin2(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newScorePlugin2(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
var inj injectedResult
if err := DecodeInto(injArgs, &inj); err != nil {
return nil, err
@ -121,7 +121,7 @@ func newScorePlugin2(_ context.Context, injArgs runtime.Object, f framework.Hand
return &TestScorePlugin{scorePlugin2, inj}, nil
}
func newPluginNotImplementingScore(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newPluginNotImplementingScore(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &PluginNotImplementingScore{}, nil
}
@ -134,7 +134,7 @@ func (pl *TestScoreWithNormalizePlugin) Name() string {
return pl.name
}
func (pl *TestScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
func (pl *TestScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
return injectNormalizeRes(pl.inj, scores)
}
@ -142,7 +142,7 @@ func (pl *TestScoreWithNormalizePlugin) Score(ctx context.Context, state fwk.Cyc
return setScoreRes(pl.inj)
}
func (pl *TestScoreWithNormalizePlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *TestScoreWithNormalizePlugin) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
@ -164,7 +164,7 @@ func (pl *TestScorePlugin) Score(ctx context.Context, state fwk.CycleState, p *v
return setScoreRes(pl.inj)
}
func (pl *TestScorePlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *TestScorePlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -175,7 +175,7 @@ func (pl *PluginNotImplementingScore) Name() string {
return pluginNotImplementingScore
}
func newTestPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newTestPlugin(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
return &TestPlugin{name: testPlugin}, nil
}
@ -204,15 +204,15 @@ func (pl *TestPlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod
return 0, fwk.NewStatus(fwk.Code(pl.inj.ScoreStatus), injectReason)
}
func (pl *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *TestPlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
return pl.inj.PreFilterResult, fwk.NewStatus(fwk.Code(pl.inj.PreFilterStatus), injectReason)
}
func (pl *TestPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *TestPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
@ -220,7 +220,7 @@ func (pl *TestPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.
return fwk.NewStatus(fwk.Code(pl.inj.FilterStatus), injectFilterReason)
}
func (pl *TestPlugin) PostFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (pl *TestPlugin) PostFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
return nil, fwk.NewStatus(fwk.Code(pl.inj.PostFilterStatus), injectReason)
}
@ -254,7 +254,7 @@ func (pl *TestPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod,
return fwk.NewStatus(fwk.Code(pl.inj.BindStatus), injectReason)
}
func newTestCloseErrorPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) {
func newTestCloseErrorPlugin(_ context.Context, injArgs runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
return &TestCloseErrorPlugin{name: testCloseErrorPlugin}, nil
}
@ -282,12 +282,12 @@ func (pl *TestPreFilterPlugin) Name() string {
return preFilterPluginName
}
func (pl *TestPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *TestPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
pl.PreFilterCalled++
return nil, nil
}
func (pl *TestPreFilterPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *TestPreFilterPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -302,7 +302,7 @@ func (pl *TestPreFilterWithExtensionsPlugin) Name() string {
return preFilterWithExtensionsPluginName
}
func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
pl.PreFilterCalled++
return nil, nil
}
@ -319,7 +319,7 @@ func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, stat
return nil
}
func (pl *TestPreFilterWithExtensionsPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *TestPreFilterWithExtensionsPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
@ -330,17 +330,17 @@ func (dp *TestDuplicatePlugin) Name() string {
return duplicatePluginName
}
func (dp *TestDuplicatePlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (dp *TestDuplicatePlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
return nil, nil
}
func (dp *TestDuplicatePlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (dp *TestDuplicatePlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
var _ framework.PreFilterPlugin = &TestDuplicatePlugin{}
var _ fwk.PreFilterPlugin = &TestDuplicatePlugin{}
func newDuplicatePlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newDuplicatePlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &TestDuplicatePlugin{}, nil
}
@ -356,7 +356,7 @@ func (pp *TestPermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p
return fwk.NewStatus(fwk.Wait), 10 * time.Second
}
var _ framework.PreEnqueuePlugin = &TestPreEnqueuePlugin{}
var _ fwk.PreEnqueuePlugin = &TestPreEnqueuePlugin{}
type TestPreEnqueuePlugin struct{}
@ -368,9 +368,9 @@ func (pl *TestPreEnqueuePlugin) PreEnqueue(ctx context.Context, p *v1.Pod) *fwk.
return nil
}
var _ framework.QueueSortPlugin = &TestQueueSortPlugin{}
var _ fwk.QueueSortPlugin = &TestQueueSortPlugin{}
func newQueueSortPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newQueueSortPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &TestQueueSortPlugin{}, nil
}
@ -385,9 +385,9 @@ func (pl *TestQueueSortPlugin) Less(_, _ fwk.QueuedPodInfo) bool {
return false
}
var _ framework.BindPlugin = &TestBindPlugin{}
var _ fwk.BindPlugin = &TestBindPlugin{}
func newBindPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newBindPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &TestBindPlugin{}, nil
}
@ -945,18 +945,18 @@ func TestNewFrameworkMultiPointExpansion(t *testing.T) {
func TestPreEnqueuePlugins(t *testing.T) {
tests := []struct {
name string
plugins []framework.Plugin
want []framework.PreEnqueuePlugin
plugins []fwk.Plugin
want []fwk.PreEnqueuePlugin
}{
{
name: "no PreEnqueuePlugin registered",
},
{
name: "one PreEnqueuePlugin registered",
plugins: []framework.Plugin{
plugins: []fwk.Plugin{
&TestPreEnqueuePlugin{},
},
want: []framework.PreEnqueuePlugin{
want: []fwk.PreEnqueuePlugin{
&TestPreEnqueuePlugin{},
},
},
@ -971,7 +971,7 @@ func TestPreEnqueuePlugins(t *testing.T) {
// register all plugins
tmpPl := pl
if err := registry.Register(pl.Name(),
func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register preEnqueue plugin (%s)", pl.Name())
@ -1096,7 +1096,7 @@ func TestRunPreScorePlugins(t *testing.T) {
for i, p := range tt.plugins {
p := p
enabled[i].Name = p.name
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return p, nil
}); err != nil {
t.Fatalf("fail to register PreScorePlugins plugin (%s)", p.Name())
@ -1137,7 +1137,7 @@ func TestRunScorePlugins(t *testing.T) {
registry Registry
plugins *config.Plugins
pluginConfigs []config.PluginConfig
want []framework.NodePluginScores
want []fwk.NodePluginScores
skippedPlugins sets.Set[string]
// If err is true, we expect RunScorePlugin to fail.
err bool
@ -1145,14 +1145,14 @@ func TestRunScorePlugins(t *testing.T) {
{
name: "no Score plugins",
plugins: buildScoreConfigDefaultWeights(),
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
{
Name: "node2",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
},
},
@ -1168,10 +1168,10 @@ func TestRunScorePlugins(t *testing.T) {
},
},
// scorePlugin1 Score returns 1, weight=1, so want=1.
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1181,7 +1181,7 @@ func TestRunScorePlugins(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1204,10 +1204,10 @@ func TestRunScorePlugins(t *testing.T) {
},
},
// scoreWithNormalizePlugin1 Score returns 10, but NormalizeScore overrides to 5, weight=1, so want=5
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scoreWithNormalizePlugin1,
Score: 5,
@ -1217,7 +1217,7 @@ func TestRunScorePlugins(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scoreWithNormalizePlugin1,
Score: 5,
@ -1253,10 +1253,10 @@ func TestRunScorePlugins(t *testing.T) {
// scorePlugin1 Score returns 1, weight =1, so want=1.
// scoreWithNormalizePlugin1 Score returns 3, but NormalizeScore overrides to 4, weight=1, so want=4.
// scoreWithNormalizePlugin2 Score returns 4, but NormalizeScore overrides to 5, weight=2, so want=10.
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1274,7 +1274,7 @@ func TestRunScorePlugins(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1325,7 +1325,7 @@ func TestRunScorePlugins(t *testing.T) {
{
Name: scorePlugin1,
Args: &runtime.Unknown{
Raw: []byte(fmt.Sprintf(`{ "scoreRes": %d }`, framework.MaxNodeScore+1)),
Raw: []byte(fmt.Sprintf(`{ "scoreRes": %d }`, fwk.MaxNodeScore+1)),
},
},
},
@ -1338,7 +1338,7 @@ func TestRunScorePlugins(t *testing.T) {
{
Name: scorePlugin1,
Args: &runtime.Unknown{
Raw: []byte(fmt.Sprintf(`{ "scoreRes": %d }`, framework.MinNodeScore-1)),
Raw: []byte(fmt.Sprintf(`{ "scoreRes": %d }`, fwk.MinNodeScore-1)),
},
},
},
@ -1351,7 +1351,7 @@ func TestRunScorePlugins(t *testing.T) {
{
Name: scoreWithNormalizePlugin1,
Args: &runtime.Unknown{
Raw: []byte(fmt.Sprintf(`{ "normalizeRes": %d }`, framework.MaxNodeScore+1)),
Raw: []byte(fmt.Sprintf(`{ "normalizeRes": %d }`, fwk.MaxNodeScore+1)),
},
},
},
@ -1364,7 +1364,7 @@ func TestRunScorePlugins(t *testing.T) {
{
Name: scoreWithNormalizePlugin1,
Args: &runtime.Unknown{
Raw: []byte(fmt.Sprintf(`{ "normalizeRes": %d }`, framework.MinNodeScore-1)),
Raw: []byte(fmt.Sprintf(`{ "normalizeRes": %d }`, fwk.MinNodeScore-1)),
},
},
},
@ -1393,10 +1393,10 @@ func TestRunScorePlugins(t *testing.T) {
},
},
// scorePlugin1 Score returns 1, weight=3, so want=3.
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 3,
@ -1406,7 +1406,7 @@ func TestRunScorePlugins(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 3,
@ -1434,10 +1434,10 @@ func TestRunScorePlugins(t *testing.T) {
},
},
skippedPlugins: sets.New(scoreWithNormalizePlugin1),
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1447,7 +1447,7 @@ func TestRunScorePlugins(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: scorePlugin1,
Score: 1,
@ -1469,14 +1469,14 @@ func TestRunScorePlugins(t *testing.T) {
},
},
skippedPlugins: sets.New(scorePlugin1),
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
{
Name: "node2",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
},
},
@ -1485,14 +1485,14 @@ func TestRunScorePlugins(t *testing.T) {
plugins: buildScoreConfigDefaultWeights(scorePlugin1),
pluginConfigs: nil,
skippedPlugins: sets.New(scorePlugin1, "score-plugin-unknown"),
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
{
Name: "node2",
Scores: []framework.PluginScore{},
Scores: []fwk.PluginScore{},
},
},
},
@ -1542,11 +1542,11 @@ func TestPreFilterPlugins(t *testing.T) {
preFilter2 := &TestPreFilterWithExtensionsPlugin{}
r := make(Registry)
r.Register(preFilterPluginName,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return preFilter1, nil
})
r.Register(preFilterWithExtensionsPluginName,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return preFilter2, nil
})
plugins := &config.Plugins{PreFilter: config.PluginSet{Enabled: []config.Plugin{{Name: preFilterWithExtensionsPluginName}, {Name: preFilterPluginName}}}}
@ -1587,7 +1587,7 @@ func TestRunPreFilterPlugins(t *testing.T) {
tests := []struct {
name string
plugins []*TestPlugin
wantPreFilterResult *framework.PreFilterResult
wantPreFilterResult *fwk.PreFilterResult
wantSkippedPlugins sets.Set[string]
wantStatusCode fwk.Code
}{
@ -1713,7 +1713,7 @@ func TestRunPreFilterPlugins(t *testing.T) {
plugins: []*TestPlugin{
{
name: "reject-all-nodes",
inj: injectedResult{PreFilterResult: &framework.PreFilterResult{NodeNames: sets.New[string]()}},
inj: injectedResult{PreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New[string]()}},
},
{
// to make sure this plugin is not executed, this plugin return Skip and we confirm it via wantSkippedPlugins.
@ -1721,7 +1721,7 @@ func TestRunPreFilterPlugins(t *testing.T) {
inj: injectedResult{PreFilterStatus: int(fwk.Skip)},
},
},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New[string]()},
wantPreFilterResult: &fwk.PreFilterResult{NodeNames: sets.New[string]()},
wantSkippedPlugins: sets.New[string](), // "skip" plugin isn't executed.
wantStatusCode: fwk.UnschedulableAndUnresolvable,
},
@ -1733,7 +1733,7 @@ func TestRunPreFilterPlugins(t *testing.T) {
for i, p := range tt.plugins {
p := p
enabled[i].Name = p.name
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return p, nil
}); err != nil {
t.Fatalf("fail to register PreFilter plugin (%s)", p.Name())
@ -1828,7 +1828,7 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) {
for i, p := range tt.plugins {
p := p
enabled[i].Name = p.name
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return p, nil
}); err != nil {
t.Fatalf("fail to register PreFilterExtension plugin (%s)", p.Name())
@ -1916,7 +1916,7 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) {
for i, p := range tt.plugins {
p := p
enabled[i].Name = p.name
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return p, nil
}); err != nil {
t.Fatalf("fail to register PreFilterExtension plugin (%s)", p.Name())
@ -2125,7 +2125,7 @@ func TestFilterPlugins(t *testing.T) {
// register all plugins
tmpPl := pl
if err := registry.Register(pl.name,
func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.name)
@ -2253,7 +2253,7 @@ func TestPostFilterPlugins(t *testing.T) {
// register all plugins
tmpPl := pl
if err := registry.Register(pl.name,
func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register postFilter plugin (%s)", pl.name)
@ -2389,7 +2389,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
if tt.preFilterPlugin != nil {
if err := registry.Register(tt.preFilterPlugin.name,
func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tt.preFilterPlugin, nil
}); err != nil {
t.Fatalf("fail to register preFilter plugin (%s)", tt.preFilterPlugin.name)
@ -2401,7 +2401,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
}
if tt.filterPlugin != nil {
if err := registry.Register(tt.filterPlugin.name,
func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tt.filterPlugin, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", tt.filterPlugin.name)
@ -2430,7 +2430,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
podNominator.AddNominatedPod(
logger,
mustNewPodInfo(t, tt.nominatedPod),
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: nodeName})
&fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: nodeName})
}
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
ctx, cancel := context.WithCancel(ctx)
@ -2582,7 +2582,7 @@ func TestPreBindPlugins(t *testing.T) {
for _, pl := range tt.plugins {
tmpPl := pl
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("Unable to register pre bind plugins: %s", pl.name)
@ -2686,7 +2686,7 @@ func TestPreBindPreFlightPlugins(t *testing.T) {
for _, pl := range tt.plugins {
tmpPl := pl
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("Unable to register pre bind plugins: %s", pl.name)
@ -2848,7 +2848,7 @@ func TestReservePlugins(t *testing.T) {
for _, pl := range tt.plugins {
tmpPl := pl
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("Unable to register pre bind plugins: %s", pl.name)
@ -2978,7 +2978,7 @@ func TestPermitPlugins(t *testing.T) {
for _, pl := range tt.plugins {
tmpPl := pl
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("Unable to register Permit plugin: %s", pl.name)
@ -3153,7 +3153,7 @@ func TestRecordingMetrics(t *testing.T) {
plugin := &TestPlugin{name: testPlugin, inj: tt.inject}
r := make(Registry)
r.Register(testPlugin,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return plugin, nil
})
pluginSet := config.PluginSet{Enabled: []config.Plugin{{Name: testPlugin, Weight: 1}}}
@ -3280,7 +3280,7 @@ func TestRunBindPlugins(t *testing.T) {
name := fmt.Sprintf("bind-%d", i)
plugin := &TestPlugin{name: name, inj: injectedResult{BindStatus: int(inj)}}
r.Register(name,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return plugin, nil
})
pluginSet.Enabled = append(pluginSet.Enabled, config.Plugin{Name: name})
@ -3342,7 +3342,7 @@ func TestPermitWaitDurationMetric(t *testing.T) {
plugin := &TestPlugin{name: testPlugin, inj: tt.inject}
r := make(Registry)
err := r.Register(testPlugin,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return plugin, nil
})
if err != nil {
@ -3407,7 +3407,7 @@ func TestWaitOnPermit(t *testing.T) {
testPermitPlugin := &TestPermitPlugin{}
r := make(Registry)
r.Register(permitPlugin,
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return testPermitPlugin, nil
})
plugins := &config.Plugins{
@ -3550,22 +3550,22 @@ func buildScoreConfigWithWeights(weights map[string]int32, ps ...string) *config
}
type injectedResult struct {
ScoreRes int64 `json:"scoreRes,omitempty"`
NormalizeRes int64 `json:"normalizeRes,omitempty"`
ScoreStatus int `json:"scoreStatus,omitempty"`
NormalizeStatus int `json:"normalizeStatus,omitempty"`
PreFilterResult *framework.PreFilterResult `json:"preFilterResult,omitempty"`
PreFilterStatus int `json:"preFilterStatus,omitempty"`
PreFilterAddPodStatus int `json:"preFilterAddPodStatus,omitempty"`
PreFilterRemovePodStatus int `json:"preFilterRemovePodStatus,omitempty"`
FilterStatus int `json:"filterStatus,omitempty"`
PostFilterStatus int `json:"postFilterStatus,omitempty"`
PreScoreStatus int `json:"preScoreStatus,omitempty"`
ReserveStatus int `json:"reserveStatus,omitempty"`
PreBindPreFlightStatus int `json:"preBindPreFlightStatus,omitempty"`
PreBindStatus int `json:"preBindStatus,omitempty"`
BindStatus int `json:"bindStatus,omitempty"`
PermitStatus int `json:"permitStatus,omitempty"`
ScoreRes int64 `json:"scoreRes,omitempty"`
NormalizeRes int64 `json:"normalizeRes,omitempty"`
ScoreStatus int `json:"scoreStatus,omitempty"`
NormalizeStatus int `json:"normalizeStatus,omitempty"`
PreFilterResult *fwk.PreFilterResult `json:"preFilterResult,omitempty"`
PreFilterStatus int `json:"preFilterStatus,omitempty"`
PreFilterAddPodStatus int `json:"preFilterAddPodStatus,omitempty"`
PreFilterRemovePodStatus int `json:"preFilterRemovePodStatus,omitempty"`
FilterStatus int `json:"filterStatus,omitempty"`
PostFilterStatus int `json:"postFilterStatus,omitempty"`
PreScoreStatus int `json:"preScoreStatus,omitempty"`
ReserveStatus int `json:"reserveStatus,omitempty"`
PreBindPreFlightStatus int `json:"preBindPreFlightStatus,omitempty"`
PreBindStatus int `json:"preBindStatus,omitempty"`
BindStatus int `json:"bindStatus,omitempty"`
PermitStatus int `json:"permitStatus,omitempty"`
}
func setScoreRes(inj injectedResult) (int64, *fwk.Status) {
@ -3575,7 +3575,7 @@ func setScoreRes(inj injectedResult) (int64, *fwk.Status) {
return inj.ScoreRes, nil
}
func injectNormalizeRes(inj injectedResult, scores framework.NodeScoreList) *fwk.Status {
func injectNormalizeRes(inj injectedResult, scores fwk.NodeScoreList) *fwk.Status {
if fwk.Code(inj.NormalizeStatus) != fwk.Success {
return fwk.NewStatus(fwk.Code(inj.NormalizeStatus), "injecting failure.")
}

View file

@ -22,16 +22,15 @@ import (
v1 "k8s.io/api/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
type instrumentedFilterPlugin struct {
framework.FilterPlugin
fwk.FilterPlugin
metric compbasemetrics.CounterMetric
}
var _ framework.FilterPlugin = &instrumentedFilterPlugin{}
var _ fwk.FilterPlugin = &instrumentedFilterPlugin{}
func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
p.metric.Inc()
@ -39,14 +38,14 @@ func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state fwk.CycleSt
}
type instrumentedPreFilterPlugin struct {
framework.PreFilterPlugin
fwk.PreFilterPlugin
metric compbasemetrics.CounterMetric
}
var _ framework.PreFilterPlugin = &instrumentedPreFilterPlugin{}
var _ fwk.PreFilterPlugin = &instrumentedPreFilterPlugin{}
func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod, nodes)
if !status.IsSkip() {
p.metric.Inc()
@ -55,12 +54,12 @@ func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state fwk.C
}
type instrumentedPreScorePlugin struct {
framework.PreScorePlugin
fwk.PreScorePlugin
metric compbasemetrics.CounterMetric
}
var _ framework.PreScorePlugin = &instrumentedPreScorePlugin{}
var _ fwk.PreScorePlugin = &instrumentedPreScorePlugin{}
func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes)
@ -71,12 +70,12 @@ func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state fwk.Cyc
}
type instrumentedScorePlugin struct {
framework.ScorePlugin
fwk.ScorePlugin
metric compbasemetrics.CounterMetric
}
var _ framework.ScorePlugin = &instrumentedScorePlugin{}
var _ fwk.ScorePlugin = &instrumentedScorePlugin{}
func (p *instrumentedScorePlugin) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
p.metric.Inc()

View file

@ -22,21 +22,21 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"sigs.k8s.io/yaml"
)
// PluginFactory is a function that builds a plugin.
type PluginFactory = func(ctx context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error)
type PluginFactory = func(ctx context.Context, configuration runtime.Object, f fwk.Handle) (fwk.Plugin, error)
// PluginFactoryWithFts is a function that builds a plugin with certain feature gates.
type PluginFactoryWithFts[T framework.Plugin] func(context.Context, runtime.Object, framework.Handle, plfeature.Features) (T, error)
type PluginFactoryWithFts[T fwk.Plugin] func(context.Context, runtime.Object, fwk.Handle, plfeature.Features) (T, error)
// FactoryAdapter can be used to inject feature gates for a plugin that needs
// them when the caller expects the older PluginFactory method.
func FactoryAdapter[T framework.Plugin](fts plfeature.Features, withFts PluginFactoryWithFts[T]) PluginFactory {
return func(ctx context.Context, plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func FactoryAdapter[T fwk.Plugin](fts plfeature.Features, withFts PluginFactoryWithFts[T]) PluginFactory {
return func(ctx context.Context, plArgs runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return withFts(ctx, plArgs, fh, fts)
}
}

View file

@ -24,7 +24,7 @@ import (
"github.com/google/uuid"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/framework"
fwk "k8s.io/kube-scheduler/framework"
)
func TestDecodeInto(t *testing.T) {
@ -110,7 +110,7 @@ func (p *mockNoopPlugin) Name() string {
func NewMockNoopPluginFactory() PluginFactory {
uuid := uuid.New().String()
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &mockNoopPlugin{uuid}, nil
}
}

View file

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// waitingPodsMap a thread-safe map used to maintain pods waiting in the permit phase.
@ -62,7 +61,7 @@ func (m *waitingPodsMap) get(uid types.UID) *waitingPod {
}
// iterate acquires a read lock and iterates over the WaitingPods map.
func (m *waitingPodsMap) iterate(callback func(framework.WaitingPod)) {
func (m *waitingPodsMap) iterate(callback func(fwk.WaitingPod)) {
m.mu.RLock()
defer m.mu.RUnlock()
for _, v := range m.pods {
@ -78,7 +77,7 @@ type waitingPod struct {
mu sync.RWMutex
}
var _ framework.WaitingPod = &waitingPod{}
var _ fwk.WaitingPod = &waitingPod{}
// newWaitingPod returns a new waitingPod instance.
func newWaitingPod(pod *v1.Pod, pluginsMaxWaitTime map[string]time.Duration) *waitingPod {

View file

@ -796,8 +796,8 @@ func (f *FitError) Error() string {
// the scheduling cycle went through PreFilter extension point successfully.
//
// When the prefilter plugin returns unschedulable,
// the scheduling framework inserts the same unschedulable status to all nodes in NodeToStatusMap.
// So, we shouldn't add the message from NodeToStatusMap when the PreFilter failed.
// the scheduling framework inserts the same unschedulable status to all nodes in NodeToStatusReader.
// So, we shouldn't add the message from NodeToStatusReader when the PreFilter failed.
// Otherwise, we will have duplicated reasons in the error message.
reasons := make(map[string]int)
f.Diagnosis.NodeToStatus.ForEachExplicitNode(func(_ string, status *fwk.Status) {
@ -806,7 +806,7 @@ func (f *FitError) Error() string {
}
})
if f.Diagnosis.NodeToStatus.Len() < f.NumAllNodes {
// Adding predefined reasons for nodes that are absent in NodeToStatusMap
// Adding predefined reasons for nodes that are absent in NodeToStatusReader
for _, reason := range f.Diagnosis.NodeToStatus.AbsentNodesStatus().Reasons() {
reasons[reason] += f.NumAllNodes - f.Diagnosis.NodeToStatus.Len()
}

View file

@ -28,7 +28,6 @@ import (
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
@ -291,8 +290,8 @@ func (p *fakePlugin) Bind(context.Context, fwk.CycleState, *v1.Pod, string) *fwk
return nil
}
func newFakePlugin(name string) func(ctx context.Context, object runtime.Object, handle framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newFakePlugin(name string) func(ctx context.Context, object runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &fakePlugin{name: name}, nil
}
}

View file

@ -139,11 +139,11 @@ func (sched *Scheduler) ScheduleOne(ctx context.Context) {
// newFailureNominatingInfo returns the appropriate NominatingInfo for scheduling failures.
// When NominatedNodeNameForExpectation feature is enabled, it returns nil (no clearing).
// Otherwise, it returns NominatingInfo to clear the pod's nominated node.
func (sched *Scheduler) newFailureNominatingInfo() *framework.NominatingInfo {
func (sched *Scheduler) newFailureNominatingInfo() *fwk.NominatingInfo {
if sched.nominatedNodeNameForExpectationEnabled {
return nil
}
return &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: ""}
return &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: ""}
}
// schedulingCycle tries to schedule a single Pod.
@ -193,7 +193,7 @@ func (sched *Scheduler) schedulingCycle(
logger.V(5).Info("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg)
}
var nominatingInfo *framework.NominatingInfo
var nominatingInfo *fwk.NominatingInfo
if result != nil {
nominatingInfo = result.NominatingInfo
}
@ -297,9 +297,9 @@ func (sched *Scheduler) bindingCycle(
if preFlightStatus.IsSuccess() || schedFramework.WillWaitOnPermit(ctx, assumedPod) {
// Add NominatedNodeName to tell the external components (e.g., the cluster autoscaler) that the pod is about to be bound to the node.
// We only do this when any of WaitOnPermit or PreBind will work because otherwise the pod will be soon bound anyway.
if err := updatePod(ctx, sched.client, schedFramework.APICacher(), assumedPod, nil, &framework.NominatingInfo{
if err := updatePod(ctx, sched.client, schedFramework.APICacher(), assumedPod, nil, &fwk.NominatingInfo{
NominatedNodeName: scheduleResult.SuggestedHost,
NominatingMode: framework.ModeOverride,
NominatingMode: fwk.ModeOverride,
}); err != nil {
logger.Error(err, "Failed to update the nominated node name in the binding cycle", "pod", klog.KObj(assumedPod), "nominatedNodeName", scheduleResult.SuggestedHost)
// We continue the processing because it's not critical enough to stop binding cycles here.
@ -550,7 +550,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, schedFramework
//
// Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework.
// When Extenders reject some Nodes and the pod ends up being unschedulable,
// we put framework.ExtenderName to pInfo.UnschedulablePlugins.
// we put fwk.ExtenderName to pInfo.UnschedulablePlugins.
// This Pod will be requeued from unschedulable pod pool to activeQ/backoffQ
// by any kind of cluster events.
// https://github.com/kubernetes/kubernetes/issues/122019
@ -722,7 +722,7 @@ func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32,
return numNodes
}
func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Extender, pod *v1.Pod, feasibleNodes []fwk.NodeInfo, statuses *framework.NodeToStatus) ([]fwk.NodeInfo, error) {
func findNodesThatPassExtenders(ctx context.Context, extenders []fwk.Extender, pod *v1.Pod, feasibleNodes []fwk.NodeInfo, statuses *framework.NodeToStatus) ([]fwk.NodeInfo, error) {
logger := klog.FromContext(ctx)
// Extenders are called sequentially.
@ -775,19 +775,19 @@ func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Exten
// All scores are finally combined (added) to get the total weighted scores of all nodes
func prioritizeNodes(
ctx context.Context,
extenders []framework.Extender,
extenders []fwk.Extender,
schedFramework framework.Framework,
state fwk.CycleState,
pod *v1.Pod,
nodes []fwk.NodeInfo,
) ([]framework.NodePluginScores, error) {
) ([]fwk.NodePluginScores, error) {
logger := klog.FromContext(ctx)
// If no priority configs are provided, then all nodes will have a score of one.
// This is required to generate the priority list in the required format
if len(extenders) == 0 && !schedFramework.HasScorePlugins() {
result := make([]framework.NodePluginScores, 0, len(nodes))
result := make([]fwk.NodePluginScores, 0, len(nodes))
for i := range nodes {
result = append(result, framework.NodePluginScores{
result = append(result, fwk.NodePluginScores{
Name: nodes[i].Node().Name,
TotalScore: 1,
})
@ -820,7 +820,7 @@ func prioritizeNodes(
if len(extenders) != 0 && nodes != nil {
// allNodeExtendersScores has all extenders scores for all nodes.
// It is keyed with node name.
allNodeExtendersScores := make(map[string]*framework.NodePluginScores, len(nodes))
allNodeExtendersScores := make(map[string]*fwk.NodePluginScores, len(nodes))
var mu sync.Mutex
var wg sync.WaitGroup
for i := range extenders {
@ -851,15 +851,15 @@ func prioritizeNodes(
// MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore,
// therefore we need to scale the score returned by extenders to the score range used by the scheduler.
finalscore := score * weight * (framework.MaxNodeScore / extenderv1.MaxExtenderPriority)
finalscore := score * weight * (fwk.MaxNodeScore / extenderv1.MaxExtenderPriority)
if allNodeExtendersScores[nodename] == nil {
allNodeExtendersScores[nodename] = &framework.NodePluginScores{
allNodeExtendersScores[nodename] = &fwk.NodePluginScores{
Name: nodename,
Scores: make([]framework.PluginScore, 0, len(extenders)),
Scores: make([]fwk.PluginScore, 0, len(extenders)),
}
}
allNodeExtendersScores[nodename].Scores = append(allNodeExtendersScores[nodename].Scores, framework.PluginScore{
allNodeExtendersScores[nodename].Scores = append(allNodeExtendersScores[nodename].Scores, fwk.PluginScore{
Name: extenders[extIndex].Name(),
Score: finalscore,
})
@ -891,7 +891,7 @@ var errEmptyPriorityList = errors.New("empty priorityList")
// in a reservoir sampling manner from the nodes that had the highest score.
// It also returns the top {count} Nodes,
// and the top of the list will be always the selected host.
func selectHost(nodeScoreList []framework.NodePluginScores, count int) (string, []framework.NodePluginScores, error) {
func selectHost(nodeScoreList []fwk.NodePluginScores, count int) (string, []fwk.NodePluginScores, error) {
if len(nodeScoreList) == 0 {
return "", nil, errEmptyPriorityList
}
@ -901,12 +901,12 @@ func selectHost(nodeScoreList []framework.NodePluginScores, count int) (string,
cntOfMaxScore := 1
selectedIndex := 0
// The top of the heap is the NodeScoreResult with the highest score.
sortedNodeScoreList := make([]framework.NodePluginScores, 0, count)
sortedNodeScoreList = append(sortedNodeScoreList, heap.Pop(&h).(framework.NodePluginScores))
sortedNodeScoreList := make([]fwk.NodePluginScores, 0, count)
sortedNodeScoreList = append(sortedNodeScoreList, heap.Pop(&h).(fwk.NodePluginScores))
// This for-loop will continue until all Nodes with the highest scores get checked for a reservoir sampling,
// and sortedNodeScoreList gets (count - 1) elements.
for ns := heap.Pop(&h).(framework.NodePluginScores); ; ns = heap.Pop(&h).(framework.NodePluginScores) {
for ns := heap.Pop(&h).(fwk.NodePluginScores); ; ns = heap.Pop(&h).(fwk.NodePluginScores) {
if ns.TotalScore != sortedNodeScoreList[0].TotalScore && len(sortedNodeScoreList) == count {
break
}
@ -940,8 +940,8 @@ func selectHost(nodeScoreList []framework.NodePluginScores, count int) (string,
return sortedNodeScoreList[0].Name, sortedNodeScoreList, nil
}
// nodeScoreHeap is a heap of framework.NodePluginScores.
type nodeScoreHeap []framework.NodePluginScores
// nodeScoreHeap is a heap of fwk.NodePluginScores.
type nodeScoreHeap []fwk.NodePluginScores
// nodeScoreHeap implements heap.Interface.
var _ heap.Interface = &nodeScoreHeap{}
@ -951,7 +951,7 @@ func (h nodeScoreHeap) Less(i, j int) bool { return h[i].TotalScore > h[j].Total
func (h nodeScoreHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *nodeScoreHeap) Push(x interface{}) {
*h = append(*h, x.(framework.NodePluginScores))
*h = append(*h, x.(fwk.NodePluginScores))
}
func (h *nodeScoreHeap) Pop() interface{} {
@ -1041,7 +1041,7 @@ func getAttemptsLabel(p *framework.QueuedPodInfo) string {
// handleSchedulingFailure records an event for the pod that indicates the
// pod has failed to schedule. Also, update the pod condition and nominated node name if set.
func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *framework.NominatingInfo, start time.Time) {
func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *fwk.NominatingInfo, start time.Time) {
calledDone := false
defer func() {
if !calledDone {
@ -1139,7 +1139,7 @@ func truncateMessage(message string) string {
return message[:max-len(suffix)] + suffix
}
func updatePod(ctx context.Context, client clientset.Interface, apiCacher framework.APICacher, pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) error {
func updatePod(ctx context.Context, client clientset.Interface, apiCacher fwk.APICacher, pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) error {
if apiCacher != nil {
// When API cacher is available, use it to patch the status.
_, err := apiCacher.PatchPodStatus(pod, condition, nominatingInfo)
@ -1158,7 +1158,7 @@ func updatePod(ctx context.Context, client clientset.Interface, apiCacher framew
podStatusCopy := pod.Status.DeepCopy()
// NominatedNodeName is updated only if we are trying to set it, and the value is
// different from the existing one.
nnnNeedsUpdate := nominatingInfo.Mode() == framework.ModeOverride && pod.Status.NominatedNodeName != nominatingInfo.NominatedNodeName
nnnNeedsUpdate := nominatingInfo.Mode() == fwk.ModeOverride && pod.Status.NominatedNodeName != nominatingInfo.NominatedNodeName
podConditionNeedsUpdate := condition != nil && podutil.UpdatePodCondition(podStatusCopy, condition)
if !podConditionNeedsUpdate && !nnnNeedsUpdate {
return nil

View file

@ -118,7 +118,7 @@ func (f *fakeExtender) IsIgnorable() bool {
func (f *fakeExtender) ProcessPreemption(
_ *v1.Pod,
_ map[string]*extenderv1.Victims,
_ framework.NodeInfoLister,
_ fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
return nil, nil
}
@ -168,7 +168,7 @@ func (f *fakeExtender) IsFilter() bool {
type falseMapPlugin struct{}
func newFalseMapPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &falseMapPlugin{}, nil
}
}
@ -181,14 +181,14 @@ func (pl *falseMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod,
return 0, fwk.AsStatus(errPrioritize)
}
func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *falseMapPlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
type numericMapPlugin struct{}
func newNumericMapPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &numericMapPlugin{}, nil
}
}
@ -206,12 +206,12 @@ func (pl *numericMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod
return int64(score), nil
}
func (pl *numericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *numericMapPlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
func NewNoPodsFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func NewNoPodsFilterPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &noPodsFilterPlugin{}, nil
}
@ -230,11 +230,11 @@ func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ fwk.CycleState, _
return int64(score), nil
}
func (pl *reverseNumericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *reverseNumericMapPlugin) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *fwk.Status {
func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores fwk.NodeScoreList) *fwk.Status {
var maxScore float64
minScore := math.MaxFloat64
@ -243,7 +243,7 @@ func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ fwk.Cycle
minScore = math.Min(minScore, float64(hostPriority.Score))
}
for i, hostPriority := range nodeScores {
nodeScores[i] = framework.NodeScore{
nodeScores[i] = fwk.NodeScore{
Name: hostPriority.Name,
Score: int64(maxScore + minScore - float64(hostPriority.Score)),
}
@ -252,7 +252,7 @@ func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ fwk.Cycle
}
func newReverseNumericMapPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &reverseNumericMapPlugin{}, nil
}
}
@ -267,11 +267,11 @@ func (pl *trueMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _
return 1, nil
}
func (pl *trueMapPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *trueMapPlugin) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *fwk.Status {
func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores fwk.NodeScoreList) *fwk.Status {
for _, host := range nodeScores {
if host.Name == "" {
return fwk.NewStatus(fwk.Error, "unexpected empty host name")
@ -281,7 +281,7 @@ func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *
}
func newTrueMapPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &trueMapPlugin{}, nil
}
}
@ -320,7 +320,7 @@ func (s *fakeNodeSelector) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod
return nil
}
func newFakeNodeSelector(_ context.Context, args runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newFakeNodeSelector(_ context.Context, args runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
pl := &fakeNodeSelector{}
if err := frameworkruntime.DecodeInto(args, &pl.fakeNodeSelectorArgs); err != nil {
return nil, err
@ -362,7 +362,7 @@ func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ fwk.
return nil
}
func newFakeNodeSelectorDependOnPodAnnotation(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func newFakeNodeSelectorDependOnPodAnnotation(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &fakeNodeSelectorDependOnPodAnnotation{}, nil
}
@ -370,8 +370,8 @@ type TestPlugin struct {
name string
}
var _ framework.ScorePlugin = &TestPlugin{}
var _ framework.FilterPlugin = &TestPlugin{}
var _ fwk.ScorePlugin = &TestPlugin{}
var _ fwk.FilterPlugin = &TestPlugin{}
func (t *TestPlugin) Name() string {
return t.name
@ -381,7 +381,7 @@ func (t *TestPlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod,
return 1, nil
}
func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
func (t *TestPlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -1000,7 +1000,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
var gotForgetPod *v1.Pod
var gotAssumedPod *v1.Pod
var gotBinding *v1.Binding
var gotNominatingInfo *framework.NominatingInfo
var gotNominatingInfo *fwk.NominatingInfo
client := clientsetfake.NewClientset(item.sendPod)
informerFactory := informers.NewSharedInformerFactory(client, 0)
@ -1101,7 +1101,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error) {
return item.mockScheduleResult, item.injectSchedulingError
}
sched.FailureHandler = func(ctx context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, ni *framework.NominatingInfo, start time.Time) {
sched.FailureHandler = func(ctx context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, ni *fwk.NominatingInfo, start time.Time) {
gotPod = p.Pod
gotError = status.AsError()
gotNominatingInfo = ni
@ -1157,10 +1157,10 @@ func TestSchedulerScheduleOne(t *testing.T) {
t.Errorf("Unexpected error. Wanted %v, got %v", item.expectError.Error(), gotError.Error())
}
if item.expectError != nil {
var expectedNominatingInfo *framework.NominatingInfo
var expectedNominatingInfo *fwk.NominatingInfo
// Check nominatingInfo expectation based on feature gate
if !nominatedNodeNameForExpectationEnabled {
expectedNominatingInfo = &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: ""}
expectedNominatingInfo = &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: ""}
}
if diff := cmp.Diff(expectedNominatingInfo, gotNominatingInfo); diff != "" {
t.Errorf("Unexpected nominatingInfo (-want,+got):\n%s", diff)
@ -1455,7 +1455,7 @@ func TestScheduleOneMarksPodAsProcessedBeforePreBind(t *testing.T) {
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error) {
return item.mockScheduleResult, item.injectSchedulingError
}
sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, _ *framework.NominatingInfo, _ time.Time) {
sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, _ *fwk.NominatingInfo, _ time.Time) {
gotCallsToFailureHandler++
gotPodIsInFlightAtFailureHandler = podListContainsPod(queue.InFlightPods(), p.Pod)
@ -1987,14 +1987,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
func TestSchedulerBinding(t *testing.T) {
table := []struct {
podName string
extenders []framework.Extender
extenders []fwk.Extender
wantBinderID int
name string
}{
{
name: "the extender is not a binder",
podName: "pod0",
extenders: []framework.Extender{
extenders: []fwk.Extender{
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
},
wantBinderID: -1, // default binding.
@ -2002,7 +2002,7 @@ func TestSchedulerBinding(t *testing.T) {
{
name: "one of the extenders is a binder and interested in pod",
podName: "pod0",
extenders: []framework.Extender{
extenders: []fwk.Extender{
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
&fakeExtender{isBinder: true, interestedPodName: "pod0"},
},
@ -2011,7 +2011,7 @@ func TestSchedulerBinding(t *testing.T) {
{
name: "one of the extenders is a binder, but not interested in pod",
podName: "pod1",
extenders: []framework.Extender{
extenders: []fwk.Extender{
&fakeExtender{isBinder: false, interestedPodName: "pod1"},
&fakeExtender{isBinder: true, interestedPodName: "pod0"},
},
@ -2020,7 +2020,7 @@ func TestSchedulerBinding(t *testing.T) {
{
name: "ignore when extender bind failed",
podName: "pod1",
extenders: []framework.Extender{
extenders: []fwk.Extender{
&fakeExtender{isBinder: true, errBind: true, interestedPodName: "pod1", ignorable: true},
},
wantBinderID: -1, // default binding.
@ -2103,7 +2103,7 @@ func TestUpdatePod(t *testing.T) {
currentPodConditions []v1.PodCondition
newPodCondition *v1.PodCondition
currentNominatedNodeName string
newNominatingInfo *framework.NominatingInfo
newNominatingInfo *fwk.NominatingInfo
expectPatchRequest bool
expectedPatchDataPattern string
}{
@ -2233,7 +2233,7 @@ func TestUpdatePod(t *testing.T) {
Reason: "currentReason",
Message: "currentMessage",
},
newNominatingInfo: &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"},
newNominatingInfo: &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "node1"},
expectPatchRequest: true,
expectedPatchDataPattern: `{"status":{"nominatedNodeName":"node1"}}`,
},
@ -2310,7 +2310,7 @@ func TestUpdatePod(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var apiCacher framework.APICacher
var apiCacher fwk.APICacher
if asyncAPICallsEnabled {
apiDispatcher := apidispatcher.New(cs, 16, apicalls.Relevances)
apiDispatcher.Run(logger)
@ -2354,21 +2354,21 @@ func TestUpdatePod(t *testing.T) {
func Test_SelectHost(t *testing.T) {
tests := []struct {
name string
list []framework.NodePluginScores
list []fwk.NodePluginScores
topNodesCnt int
possibleNodes sets.Set[string]
possibleNodeLists [][]framework.NodePluginScores
possibleNodeLists [][]fwk.NodePluginScores
wantError error
}{
{
name: "unique properly ordered scores",
list: []framework.NodePluginScores{
list: []fwk.NodePluginScores{
{Name: "node1", TotalScore: 1},
{Name: "node2", TotalScore: 2},
},
topNodesCnt: 2,
possibleNodes: sets.New("node2"),
possibleNodeLists: [][]framework.NodePluginScores{
possibleNodeLists: [][]fwk.NodePluginScores{
{
{Name: "node2", TotalScore: 2},
{Name: "node1", TotalScore: 1},
@ -2377,13 +2377,13 @@ func Test_SelectHost(t *testing.T) {
},
{
name: "numberOfNodeScoresToReturn > len(list)",
list: []framework.NodePluginScores{
list: []fwk.NodePluginScores{
{Name: "node1", TotalScore: 1},
{Name: "node2", TotalScore: 2},
},
topNodesCnt: 100,
possibleNodes: sets.New("node2"),
possibleNodeLists: [][]framework.NodePluginScores{
possibleNodeLists: [][]fwk.NodePluginScores{
{
{Name: "node2", TotalScore: 2},
{Name: "node1", TotalScore: 1},
@ -2392,14 +2392,14 @@ func Test_SelectHost(t *testing.T) {
},
{
name: "equal scores",
list: []framework.NodePluginScores{
list: []fwk.NodePluginScores{
{Name: "node2.1", TotalScore: 2},
{Name: "node2.2", TotalScore: 2},
{Name: "node2.3", TotalScore: 2},
},
topNodesCnt: 2,
possibleNodes: sets.New("node2.1", "node2.2", "node2.3"),
possibleNodeLists: [][]framework.NodePluginScores{
possibleNodeLists: [][]fwk.NodePluginScores{
{
{Name: "node2.1", TotalScore: 2},
{Name: "node2.2", TotalScore: 2},
@ -2428,7 +2428,7 @@ func Test_SelectHost(t *testing.T) {
},
{
name: "out of order scores",
list: []framework.NodePluginScores{
list: []fwk.NodePluginScores{
{Name: "node3.1", TotalScore: 3},
{Name: "node2.1", TotalScore: 2},
{Name: "node1.1", TotalScore: 1},
@ -2436,7 +2436,7 @@ func Test_SelectHost(t *testing.T) {
},
topNodesCnt: 3,
possibleNodes: sets.New("node3.1", "node3.2"),
possibleNodeLists: [][]framework.NodePluginScores{
possibleNodeLists: [][]fwk.NodePluginScores{
{
{Name: "node3.1", TotalScore: 3},
{Name: "node3.2", TotalScore: 3},
@ -2451,7 +2451,7 @@ func Test_SelectHost(t *testing.T) {
},
{
name: "empty priority list",
list: []framework.NodePluginScores{},
list: []fwk.NodePluginScores{},
possibleNodes: sets.Set[string]{},
wantError: errEmptyPriorityList,
},
@ -2638,7 +2638,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
var extenders []framework.Extender
var extenders []fwk.Extender
for ii := range tt.extenders {
extenders = append(extenders, &tt.extenders[ii])
}
@ -3168,11 +3168,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterPreFilterPlugin(
"FakePreFilter2",
tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter2", &fwk.PreFilterResult{NodeNames: sets.New("node2")}, nil),
),
tf.RegisterPreFilterPlugin(
"FakePreFilter3",
tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter3", &fwk.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@ -3196,11 +3196,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterPreFilterPlugin(
"FakePreFilter2",
tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter2", &fwk.PreFilterResult{NodeNames: sets.New("node2")}, nil),
),
tf.RegisterPreFilterPlugin(
"FakePreFilter3",
tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter3", &fwk.PreFilterResult{NodeNames: sets.New("node1")}, nil),
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@ -3230,7 +3230,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterPreFilterPlugin(
"FakePreFilter2",
tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter2", &fwk.PreFilterResult{NodeNames: sets.New[string]()}, nil),
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@ -3254,7 +3254,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterPreFilterPlugin(
"FakePreFilter",
tf.NewFakePreFilterPlugin("FakePreFilter", &framework.PreFilterResult{NodeNames: sets.New[string]("node2")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter", &fwk.PreFilterResult{NodeNames: sets.New[string]("node2")}, nil),
),
tf.RegisterFilterPlugin(
"FakeFilter",
@ -3293,7 +3293,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
"node1": fwk.Unschedulable,
}),
),
tf.RegisterPluginAsExtensions("FakeFilter2", func(_ context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) {
tf.RegisterPluginAsExtensions("FakeFilter2", func(_ context.Context, configuration runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
return tf.FakePreFilterAndFilterPlugin{
FakePreFilterPlugin: &tf.FakePreFilterPlugin{
Result: nil,
@ -3360,7 +3360,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterPreFilterPlugin(
"FakePreFilter",
tf.NewFakePreFilterPlugin("FakePreFilter", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
tf.NewFakePreFilterPlugin("FakePreFilter", &fwk.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@ -3380,7 +3380,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterPreFilterPlugin(
"FakePreFilter",
tf.NewFakePreFilterPlugin("FakePreFilter", &framework.PreFilterResult{
tf.NewFakePreFilterPlugin("FakePreFilter", &fwk.PreFilterResult{
NodeNames: sets.New("invalid-node"),
}, nil),
),
@ -3472,7 +3472,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
_, _ = cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})
}
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := tf.NewFramework(
schedFramework, err := tf.NewFramework(
ctx,
test.registerPlugins, "",
frameworkruntime.WithSnapshotSharedLister(snapshot),
@ -3483,7 +3483,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
t.Fatal(err)
}
var extenders []framework.Extender
var extenders []fwk.Extender
for ii := range test.extenders {
extenders = append(extenders, &test.extenders[ii])
}
@ -3498,7 +3498,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
result, err := sched.SchedulePod(ctx, fwk, framework.NewCycleState(), test.pod)
result, err := sched.SchedulePod(ctx, schedFramework, framework.NewCycleState(), test.pod)
if err != test.wErr {
gotFitErr, gotOK := err.(*framework.FitError)
wantFitErr, wantOK := test.wErr.(*framework.FitError)
@ -3649,7 +3649,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
plugin := tf.FakeFilterPlugin{}
registerFakeFilterFunc := tf.RegisterFilterPlugin(
"FakeFilter",
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return &plugin, nil
},
)
@ -3672,7 +3672,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
if err := scheduler.Cache.UpdateSnapshot(logger, scheduler.nodeInfoSnapshot); err != nil {
t.Fatal(err)
}
fwk, err := tf.NewFramework(
schedFramework, err := tf.NewFramework(
ctx,
registerPlugins, "",
frameworkruntime.WithPodNominator(internalqueue.NewSchedulingQueue(nil, informerFactory)),
@ -3690,9 +3690,9 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
if err != nil {
t.Fatalf("Error adding nominated pod to podInformer: %s", err)
}
fwk.AddNominatedPod(logger, podinfo, &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "1"})
schedFramework.AddNominatedPod(logger, podinfo, &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: "1"})
_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
_, _, err = scheduler.findNodesThatFitPod(ctx, schedFramework, framework.NewCycleState(), test.pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -3914,7 +3914,7 @@ func Test_prioritizeNodes(t *testing.T) {
nodes []*v1.Node
pluginRegistrations []tf.RegisterPluginFunc
extenders []tf.FakeExtender
want []framework.NodePluginScores
want []fwk.NodePluginScores
}{
{
name: "the score from all plugins should be recorded in PluginToNodeScores",
@ -3927,10 +3927,10 @@ func Test_prioritizeNodes(t *testing.T) {
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
extenders: nil,
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "Node2Prioritizer",
Score: 10,
@ -3944,7 +3944,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "Node2Prioritizer",
Score: 100,
@ -3989,10 +3989,10 @@ func Test_prioritizeNodes(t *testing.T) {
},
},
},
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "FakeExtender1",
@ -4011,7 +4011,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "FakeExtender1",
Score: 30,
@ -4044,10 +4044,10 @@ func Test_prioritizeNodes(t *testing.T) {
), "PreScore", "Score"),
},
extenders: nil,
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "Node2Prioritizer",
Score: 10,
@ -4061,7 +4061,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "Node2Prioritizer",
Score: 100,
@ -4088,9 +4088,9 @@ func Test_prioritizeNodes(t *testing.T) {
), "PreScore", "Score"),
},
extenders: nil,
want: []framework.NodePluginScores{
{Name: "node1", Scores: []framework.PluginScore{}},
{Name: "node2", Scores: []framework.PluginScore{}},
want: []fwk.NodePluginScores{
{Name: "node1", Scores: []fwk.PluginScore{}},
{Name: "node2", Scores: []fwk.PluginScore{}},
},
},
{
@ -4115,10 +4115,10 @@ func Test_prioritizeNodes(t *testing.T) {
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
extenders: nil,
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 5,
@ -4128,7 +4128,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 5,
@ -4138,7 +4138,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node3",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 5,
@ -4169,10 +4169,10 @@ func Test_prioritizeNodes(t *testing.T) {
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
extenders: nil,
want: []framework.NodePluginScores{
want: []fwk.NodePluginScores{
{
Name: "node1",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 18,
@ -4182,7 +4182,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node2",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 18,
@ -4192,7 +4192,7 @@ func Test_prioritizeNodes(t *testing.T) {
},
{
Name: "node3",
Scores: []framework.PluginScore{
Scores: []fwk.PluginScore{
{
Name: "ImageLocality",
Score: 0,
@ -4220,7 +4220,7 @@ func Test_prioritizeNodes(t *testing.T) {
if err := cache.UpdateSnapshot(klog.FromContext(ctx), snapshot); err != nil {
t.Fatal(err)
}
fwk, err := tf.NewFramework(
schedFramework, err := tf.NewFramework(
ctx,
test.pluginRegistrations, "",
frameworkruntime.WithInformerFactory(informerFactory),
@ -4233,7 +4233,7 @@ func Test_prioritizeNodes(t *testing.T) {
}
state := framework.NewCycleState()
var extenders []framework.Extender
var extenders []fwk.Extender
for ii := range test.extenders {
extenders = append(extenders, &test.extenders[ii])
}
@ -4241,7 +4241,7 @@ func Test_prioritizeNodes(t *testing.T) {
if err != nil {
t.Fatalf("failed to list node from snapshot: %v", err)
}
nodesscores, err := prioritizeNodes(ctx, extenders, fwk, state, test.pod, nodeInfos)
nodesscores, err := prioritizeNodes(ctx, extenders, schedFramework, state, test.pod, nodeInfos)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -4417,7 +4417,7 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
plugin := tf.FakeFilterPlugin{FailedNodeReturnCodeMap: test.nodeReturnCodeMap}
registerFakeFilterFunc := tf.RegisterFilterPlugin(
"FakeFilter",
func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return &plugin, nil
},
)
@ -4621,7 +4621,7 @@ func setupTestScheduler(ctx context.Context, t *testing.T, client clientset.Inte
}
sched.SchedulePod = sched.schedulePod
sched.FailureHandler = func(_ context.Context, _ framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, _ *framework.NominatingInfo, _ time.Time) {
sched.FailureHandler = func(_ context.Context, _ framework.Framework, p *framework.QueuedPodInfo, status *fwk.Status, _ *fwk.NominatingInfo, _ time.Time) {
err := status.AsError()
errChan <- err
@ -4673,7 +4673,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, clie
fns := []tf.RegisterPluginFunc{
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
tf.RegisterPluginAsExtensions(volumebinding.Name, func(ctx context.Context, plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
tf.RegisterPluginAsExtensions(volumebinding.Name, func(ctx context.Context, plArgs runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return &volumebinding.VolumeBinding{Binder: volumeBinder, PVCLister: pvcInformer.Lister()}, nil
}, "PreFilter", "Filter", "Reserve", "PreBind"),
}

View file

@ -74,7 +74,7 @@ type Scheduler struct {
// by NodeLister and Algorithm.
Cache internalcache.Cache
Extenders []framework.Extender
Extenders []fwk.Extender
// NextPod should be a function that blocks until the next pod
// is available. We don't use a channel for this, because scheduling
@ -160,7 +160,7 @@ type ScheduleResult struct {
// The number of nodes out of the evaluated ones that fit the pod.
FeasibleNodes int
// The nominating info for scheduling cycle.
nominatingInfo *framework.NominatingInfo
nominatingInfo *fwk.NominatingInfo
}
// WithComponentConfigVersion sets the component config version to the
@ -321,7 +321,7 @@ func New(ctx context.Context,
var resourceClaimCache *assumecache.AssumeCache
var resourceSliceTracker *resourceslicetracker.Tracker
var draManager framework.SharedDRAManager
var draManager fwk.SharedDRAManager
if feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
resourceClaimInformer := informerFactory.Resource().V1().ResourceClaims().Informer()
resourceClaimCache = assumecache.NewAssumeCache(logger, resourceClaimInformer, "ResourceClaim", "", nil)
@ -370,12 +370,12 @@ func New(ctx context.Context,
return nil, errors.New("at least one profile is required")
}
preEnqueuePluginMap := make(map[string]map[string]framework.PreEnqueuePlugin)
preEnqueuePluginMap := make(map[string]map[string]fwk.PreEnqueuePlugin)
queueingHintsPerProfile := make(internalqueue.QueueingHintMapPerProfile)
var returnErr error
for profileName, profile := range profiles {
plugins := profile.PreEnqueuePlugins()
preEnqueuePluginMap[profileName] = make(map[string]framework.PreEnqueuePlugin, len(plugins))
preEnqueuePluginMap[profileName] = make(map[string]fwk.PreEnqueuePlugin, len(plugins))
for _, plugin := range plugins {
preEnqueuePluginMap[profileName][plugin.Name()] = plugin
}
@ -407,7 +407,7 @@ func New(ctx context.Context,
schedulerCache := internalcache.New(ctx, durationToExpireAssumedPod, apiDispatcher)
var apiCache framework.APICacher
var apiCache fwk.APICacher
if apiDispatcher != nil {
apiCache = apicache.New(podQueue, schedulerCache)
}
@ -451,7 +451,7 @@ var defaultQueueingHintFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (fw
return fwk.Queue, nil
}
func buildQueueingHintMap(ctx context.Context, es []framework.EnqueueExtensions) (internalqueue.QueueingHintMap, error) {
func buildQueueingHintMap(ctx context.Context, es []fwk.EnqueueExtensions) (internalqueue.QueueingHintMap, error) {
queueingHintMap := make(internalqueue.QueueingHintMap)
var returnErr error
for _, e := range es {
@ -558,14 +558,14 @@ func NewInformerFactory(cs clientset.Interface, resyncPeriod time.Duration) info
return informerFactory
}
func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]framework.Extender, error) {
var fExtenders []framework.Extender
func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]fwk.Extender, error) {
var fExtenders []fwk.Extender
if len(extenders) == 0 {
return nil, nil
}
var ignoredExtendedResources []string
var ignorableExtenders []framework.Extender
var ignorableExtenders []fwk.Extender
for i := range extenders {
logger.V(2).Info("Creating extender", "extender", extenders[i])
extender, err := NewHTTPExtender(&extenders[i])
@ -616,7 +616,7 @@ func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profi
return fExtenders, nil
}
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *framework.NominatingInfo, start time.Time)
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *fwk.NominatingInfo, start time.Time)
func unionedGVKs(queueingHintsPerProfile internalqueue.QueueingHintMapPerProfile) map[fwk.EventResource]fwk.ActionType {
gvkMap := make(map[fwk.EventResource]fwk.ActionType)

View file

@ -242,7 +242,7 @@ func TestSchedulerCreation(t *testing.T) {
t.Errorf("unexpected extenders (-want, +got):\n%s", diff)
}
// framework.Handle.Extenders()
// fwk.Handle.Extenders()
for _, p := range s.Profiles {
extenders := make([]string, 0, len(p.Extenders()))
for _, e := range p.Extenders() {
@ -525,7 +525,7 @@ func TestInitPluginsWithIndexers(t *testing.T) {
{
name: "register indexer, no conflicts",
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
"AddIndexer": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodSpecNodeName,
@ -538,14 +538,14 @@ func TestInitPluginsWithIndexers(t *testing.T) {
name: "register the same indexer name multiple times, conflict",
// order of registration doesn't matter
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodSpecNodeName,
})
return &TestPlugin{name: "AddIndexer1"}, err
},
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodAnnotationNodeName,
@ -559,14 +559,14 @@ func TestInitPluginsWithIndexers(t *testing.T) {
name: "register the same indexer body with different names, no conflicts",
// order of registration doesn't matter
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName1": indexByPodSpecNodeName,
})
return &TestPlugin{name: "AddIndexer1"}, err
},
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName2": indexByPodAnnotationNodeName,
@ -652,14 +652,14 @@ const (
func Test_buildQueueingHintMap(t *testing.T) {
tests := []struct {
name string
plugins []framework.Plugin
plugins []fwk.Plugin
want map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction
featuregateDisabled bool
wantErr error
}{
{
name: "filter without EnqueueExtensions plugin",
plugins: []framework.Plugin{&filterWithoutEnqueueExtensionsPlugin{}},
plugins: []fwk.Plugin{&filterWithoutEnqueueExtensionsPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
@ -695,7 +695,7 @@ func Test_buildQueueingHintMap(t *testing.T) {
},
{
name: "node and pod plugin",
plugins: []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
plugins: []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
{PluginName: fakePod, QueueingHintFn: fakePodPluginQueueingFn},
@ -710,7 +710,7 @@ func Test_buildQueueingHintMap(t *testing.T) {
},
{
name: "node and pod plugin (featuregate is disabled)",
plugins: []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
plugins: []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
featuregateDisabled: true,
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
@ -726,12 +726,12 @@ func Test_buildQueueingHintMap(t *testing.T) {
},
{
name: "register plugin with empty event",
plugins: []framework.Plugin{&emptyEventPlugin{}},
plugins: []fwk.Plugin{&emptyEventPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{},
},
{
name: "register plugins including emptyEventPlugin",
plugins: []framework.Plugin{&emptyEventPlugin{}, &fakeNodePlugin{}},
plugins: []fwk.Plugin{&emptyEventPlugin{}, &fakeNodePlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
{PluginName: fakePod, QueueingHintFn: fakePodPluginQueueingFn},
@ -746,7 +746,7 @@ func Test_buildQueueingHintMap(t *testing.T) {
},
{
name: "one EventsToRegister returns an error",
plugins: []framework.Plugin{&errorEventsToRegisterPlugin{}},
plugins: []fwk.Plugin{&errorEventsToRegisterPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{},
wantErr: errors.New("mock error"),
},
@ -767,7 +767,7 @@ func Test_buildQueueingHintMap(t *testing.T) {
plugins := append(tt.plugins, &fakebindPlugin{}, &fakeQueueSortPlugin{})
for _, pl := range plugins {
tmpPl := pl
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.Name())
@ -1030,10 +1030,10 @@ func Test_UnionedGVKs(t *testing.T) {
registry := plugins.NewInTreeRegistry()
cfgPls := &schedulerapi.Plugins{MultiPoint: tt.plugins}
plugins := []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}, &filterWithoutEnqueueExtensionsPlugin{}, &emptyEventsToRegisterPlugin{}, &fakeQueueSortPlugin{}, &fakebindPlugin{}}
plugins := []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}, &filterWithoutEnqueueExtensionsPlugin{}, &emptyEventsToRegisterPlugin{}, &fakeQueueSortPlugin{}, &fakebindPlugin{}}
for _, pl := range plugins {
tmpPl := pl
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.Name())
@ -1209,8 +1209,8 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
utiltesting.Eventually(tCtx, func(utiltesting.TContext) sets.Set[string] {
// Ensure that all waitingPods in scheduler can be obtained from any profiles.
actualPodNamesInWaitingPods := sets.New[string]()
for _, fwk := range scheduler.Profiles {
fwk.IterateOverWaitingPods(func(pod framework.WaitingPod) {
for _, schedFramework := range scheduler.Profiles {
schedFramework.IterateOverWaitingPods(func(pod fwk.WaitingPod) {
actualPodNamesInWaitingPods.Insert(pod.GetPod().Name)
})
}
@ -1220,7 +1220,7 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
}
}
var _ framework.QueueSortPlugin = &fakeQueueSortPlugin{}
var _ fwk.QueueSortPlugin = &fakeQueueSortPlugin{}
// fakeQueueSortPlugin is a no-op implementation for QueueSort extension point.
type fakeQueueSortPlugin struct{}
@ -1233,7 +1233,7 @@ func (pl *fakeQueueSortPlugin) Less(_, _ fwk.QueuedPodInfo) bool {
return false
}
var _ framework.BindPlugin = &fakebindPlugin{}
var _ fwk.BindPlugin = &fakebindPlugin{}
// fakebindPlugin is a no-op implementation for Bind extension point.
type fakebindPlugin struct{}
@ -1320,7 +1320,7 @@ func (*errorEventsToRegisterPlugin) EventsToRegister(_ context.Context) ([]fwk.C
return nil, errors.New("mock error")
}
// emptyEventsToRegisterPlugin implement interface framework.EnqueueExtensions, but returns nil from EventsToRegister.
// emptyEventsToRegisterPlugin implement interface fwk.EnqueueExtensions, but returns nil from EventsToRegister.
// This can simulate a plugin registered at scheduler setup, but does nothing
// due to some disabled feature gate.
type emptyEventsToRegisterPlugin struct{}
@ -1341,7 +1341,7 @@ type fakePermitPlugin struct {
}
func newFakePermitPlugin(eventRecorder events.EventRecorder) frameworkruntime.PluginFactory {
return func(ctx context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) {
return func(ctx context.Context, configuration runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
pl := &fakePermitPlugin{
eventRecorder: eventRecorder,
}
@ -1367,4 +1367,4 @@ func (f fakePermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v
return fwk.NewStatus(fwk.Wait), permitTimeout
}
var _ framework.PermitPlugin = &fakePermitPlugin{}
var _ fwk.PermitPlugin = &fakePermitPlugin{}

View file

@ -36,7 +36,7 @@ import (
type FitPredicate func(pod *v1.Pod, node fwk.NodeInfo) *fwk.Status
// PriorityFunc is a function type which is used in fake extender.
type PriorityFunc func(pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.NodeScoreList, error)
type PriorityFunc func(pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.NodeScoreList, error)
// PriorityConfig is used in fake extender to perform Prioritize function.
type PriorityConfig struct {
@ -78,34 +78,34 @@ func Node2PredicateExtender(pod *v1.Pod, node fwk.NodeInfo) *fwk.Status {
}
// ErrorPrioritizerExtender implements PriorityFunc function to always return error.
func ErrorPrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.NodeScoreList, error) {
return &framework.NodeScoreList{}, fmt.Errorf("some error")
func ErrorPrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.NodeScoreList, error) {
return &fwk.NodeScoreList{}, fmt.Errorf("some error")
}
// Node1PrioritizerExtender implements PriorityFunc function to give score 10
// if the given node's name is "node1"; otherwise score 1.
func Node1PrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.NodeScoreList, error) {
result := framework.NodeScoreList{}
func Node1PrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.NodeScoreList, error) {
result := fwk.NodeScoreList{}
for _, node := range nodes {
score := 1
if node.Node().Name == "node1" {
score = 10
}
result = append(result, framework.NodeScore{Name: node.Node().Name, Score: int64(score)})
result = append(result, fwk.NodeScore{Name: node.Node().Name, Score: int64(score)})
}
return &result, nil
}
// Node2PrioritizerExtender implements PriorityFunc function to give score 10
// if the given node's name is "node2"; otherwise score 1.
func Node2PrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.NodeScoreList, error) {
result := framework.NodeScoreList{}
func Node2PrioritizerExtender(pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.NodeScoreList, error) {
result := fwk.NodeScoreList{}
for _, node := range nodes {
score := 1
if node.Node().Name == "node2" {
score = 10
}
result = append(result, framework.NodeScore{Name: node.Node().Name, Score: int64(score)})
result = append(result, fwk.NodeScore{Name: node.Node().Name, Score: int64(score)})
}
return &result, nil
}
@ -114,7 +114,7 @@ type node2PrioritizerPlugin struct{}
// NewNode2PrioritizerPlugin returns a factory function to build node2PrioritizerPlugin.
func NewNode2PrioritizerPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &node2PrioritizerPlugin{}, nil
}
}
@ -134,7 +134,7 @@ func (pl *node2PrioritizerPlugin) Score(_ context.Context, _ fwk.CycleState, _ *
}
// ScoreExtensions returns nil.
func (pl *node2PrioritizerPlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *node2PrioritizerPlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -182,7 +182,7 @@ func (f *FakeExtender) SupportsPreemption() bool {
func (f *FakeExtender) ProcessPreemption(
pod *v1.Pod,
nodeNameToVictims map[string]*extenderv1.Victims,
nodeInfos framework.NodeInfoLister,
nodeInfos fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
nodeNameToVictimsCopy := map[string]*extenderv1.Victims{}
// We don't want to change the original nodeNameToVictims
@ -400,4 +400,4 @@ func (f *FakeExtender) IsInterested(pod *v1.Pod) bool {
return !f.UnInterested
}
var _ framework.Extender = &FakeExtender{}
var _ fwk.Extender = &FakeExtender{}

View file

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
@ -46,7 +45,7 @@ func (pl *FalseFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1
}
// NewFalseFilterPlugin initializes a FalseFilterPlugin and returns it.
func NewFalseFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func NewFalseFilterPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FalseFilterPlugin{}, nil
}
@ -64,7 +63,7 @@ func (pl *TrueFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.
}
// NewTrueFilterPlugin initializes a TrueFilterPlugin and returns it.
func NewTrueFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func NewTrueFilterPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &TrueFilterPlugin{}, nil
}
@ -103,7 +102,7 @@ func (pl *FakeFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.
// NewFakeFilterPlugin initializes a fakeFilterPlugin and returns it.
func NewFakeFilterPlugin(failedNodeReturnCodeMap map[string]fwk.Code) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakeFilterPlugin{
FailedNodeReturnCodeMap: failedNodeReturnCodeMap,
}, nil
@ -132,13 +131,13 @@ func (pl *MatchFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1
}
// NewMatchFilterPlugin initializes a MatchFilterPlugin and returns it.
func NewMatchFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func NewMatchFilterPlugin(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &MatchFilterPlugin{}, nil
}
// FakePreFilterPlugin is a test filter plugin.
type FakePreFilterPlugin struct {
Result *framework.PreFilterResult
Result *fwk.PreFilterResult
Status *fwk.Status
name string
}
@ -149,18 +148,18 @@ func (pl *FakePreFilterPlugin) Name() string {
}
// PreFilter invoked at the PreFilter extension point.
func (pl *FakePreFilterPlugin) PreFilter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pl *FakePreFilterPlugin) PreFilter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
return pl.Result, pl.Status
}
// PreFilterExtensions no extensions implemented by this plugin.
func (pl *FakePreFilterPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pl *FakePreFilterPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
// NewFakePreFilterPlugin initializes a fakePreFilterPlugin and returns it.
func NewFakePreFilterPlugin(name string, result *framework.PreFilterResult, status *fwk.Status) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
func NewFakePreFilterPlugin(name string, result *fwk.PreFilterResult, status *fwk.Status) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakePreFilterPlugin{
Result: result,
Status: status,
@ -190,7 +189,7 @@ func (pl *FakeReservePlugin) Unreserve(_ context.Context, _ fwk.CycleState, _ *v
// NewFakeReservePlugin initializes a fakeReservePlugin and returns it.
func NewFakeReservePlugin(status *fwk.Status) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakeReservePlugin{
Status: status,
}, nil
@ -220,7 +219,7 @@ func (pl *FakePreBindPlugin) PreBind(_ context.Context, _ fwk.CycleState, _ *v1.
// NewFakePreBindPlugin initializes a fakePreBindPlugin and returns it.
func NewFakePreBindPlugin(preBindPreFlightStatus, preBindStatus *fwk.Status) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakePreBindPlugin{
PreBindPreFlightStatus: preBindPreFlightStatus,
PreBindStatus: preBindStatus,
@ -230,7 +229,7 @@ func NewFakePreBindPlugin(preBindPreFlightStatus, preBindStatus *fwk.Status) fra
// FakePermitPlugin is a test permit plugin.
type FakePermitPlugin struct {
Handle framework.Handle
Handle fwk.Handle
Status *fwk.Status
Timeout time.Duration
}
@ -247,7 +246,7 @@ func (pl *FakePermitPlugin) Permit(_ context.Context, _ fwk.CycleState, p *v1.Po
// NewFakePermitPlugin initializes a fakePermitPlugin and returns it.
func NewFakePermitPlugin(status *fwk.Status, timeout time.Duration) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, h framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, h fwk.Handle) (fwk.Plugin, error) {
return &FakePermitPlugin{
Status: status,
Timeout: timeout,
@ -272,7 +271,7 @@ func (pl *FakePreScoreAndScorePlugin) Score(ctx context.Context, state fwk.Cycle
return pl.score, pl.scoreStatus
}
func (pl *FakePreScoreAndScorePlugin) ScoreExtensions() framework.ScoreExtensions {
func (pl *FakePreScoreAndScorePlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -281,7 +280,7 @@ func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state fwk.Cy
}
func NewFakePreScoreAndScorePlugin(name string, score int64, preScoreStatus, scoreStatus *fwk.Status) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakePreScoreAndScorePlugin{
name: name,
score: score,
@ -293,7 +292,7 @@ func NewFakePreScoreAndScorePlugin(name string, score int64, preScoreStatus, sco
// NewEqualPrioritizerPlugin returns a factory function to build equalPrioritizerPlugin.
func NewEqualPrioritizerPlugin() frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return &FakePreScoreAndScorePlugin{
name: "EqualPrioritizerPlugin",
score: 1,

View file

@ -219,6 +219,7 @@
- k8s.io/klog
- k8s.io/kube-scheduler
- k8s.io/utils
- k8s.io/dynamic-resource-allocation/structured
- baseImportPath: "./staging/src/k8s.io/kubelet"
allowedImports:

View file

@ -1446,83 +1446,6 @@ rules:
dirs:
- staging/src/k8s.io/kubelet
library: true
- destination: kube-scheduler
branches:
- name: master
dependencies:
- repository: apimachinery
branch: master
- repository: component-base
branch: master
- repository: api
branch: master
- repository: client-go
branch: master
source:
branch: master
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.31
go: 1.23.12
dependencies:
- repository: apimachinery
branch: release-1.31
- repository: component-base
branch: release-1.31
- repository: api
branch: release-1.31
- repository: client-go
branch: release-1.31
source:
branch: release-1.31
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.32
go: 1.23.12
dependencies:
- repository: apimachinery
branch: release-1.32
- repository: component-base
branch: release-1.32
- repository: api
branch: release-1.32
- repository: client-go
branch: release-1.32
source:
branch: release-1.32
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.33
go: 1.24.6
dependencies:
- repository: apimachinery
branch: release-1.33
- repository: component-base
branch: release-1.33
- repository: api
branch: release-1.33
- repository: client-go
branch: release-1.33
source:
branch: release-1.33
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.34
go: 1.24.6
dependencies:
- repository: apimachinery
branch: release-1.34
- repository: component-base
branch: release-1.34
- repository: api
branch: release-1.34
- repository: client-go
branch: release-1.34
source:
branch: release-1.34
dirs:
- staging/src/k8s.io/kube-scheduler
library: true
- destination: controller-manager
branches:
- name: master
@ -2350,6 +2273,103 @@ rules:
branch: release-1.34
dirs:
- staging/src/k8s.io/dynamic-resource-allocation
- destination: kube-scheduler
branches:
- name: master
dependencies:
- repository: apimachinery
branch: master
- repository: component-base
branch: master
- repository: api
branch: master
- repository: client-go
branch: master
- repository: apiserver
branch: master
- repository: component-helpers
branch: master
- repository: dynamic-resource-allocation
branch: master
- repository: kms
branch: master
- repository: kubelet
branch: master
source:
branch: master
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.31
go: 1.23.11
dependencies:
- repository: apimachinery
branch: release-1.31
- repository: component-base
branch: release-1.31
- repository: api
branch: release-1.31
- repository: client-go
branch: release-1.31
source:
branch: release-1.31
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.32
go: 1.23.11
dependencies:
- repository: apimachinery
branch: release-1.32
- repository: component-base
branch: release-1.32
- repository: api
branch: release-1.32
- repository: client-go
branch: release-1.32
source:
branch: release-1.32
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.33
go: 1.24.5
dependencies:
- repository: apimachinery
branch: release-1.33
- repository: component-base
branch: release-1.33
- repository: api
branch: release-1.33
- repository: client-go
branch: release-1.33
- repository: apiserver
branch: release-1.33
- repository: component-helpers
branch: release-1.33
- repository: dynamic-resource-allocation
branch: release-1.33
- repository: kms
branch: release-1.33
- repository: kubelet
branch: release-1.33
source:
branch: release-1.33
dirs:
- staging/src/k8s.io/kube-scheduler
- name: release-1.34
go: 1.24.6
dependencies:
- repository: apimachinery
branch: release-1.34
- repository: component-base
branch: release-1.34
- repository: api
branch: release-1.34
- repository: client-go
branch: release-1.34
source:
branch: release-1.34
dirs:
- staging/src/k8s.io/kube-scheduler
library: true
- destination: endpointslice
branches:
- name: master

View file

@ -0,0 +1,57 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
v1 "k8s.io/api/core/v1"
)
// APICacher defines methods that send API calls through the scheduler's cache
// before they are executed asynchronously by the APIDispatcher.
// This ensures the scheduler's internal state is updated optimistically,
// reflecting the intended outcome of the call.
// This methods should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
type APICacher interface {
// PatchPodStatus sends a patch request for a Pod's status.
// The patch could be first applied to the cached Pod object and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *NominatingInfo) (<-chan error, error)
// BindPod sends a binding request. The binding could be first applied to the cached Pod object
// and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
BindPod(binding *v1.Binding) (<-chan error, error)
// WaitOnFinish blocks until the result of an API call is sent to the given onFinish channel
// (returned by methods BindPod or PreemptPod).
//
// It returns the error received from the channel.
// It also returns nil if the call was skipped or overwritten,
// as these are considered successful lifecycle outcomes.
// Direct onFinish channel read can be used to access these results.
WaitOnFinish(ctx context.Context, onFinish <-chan error) error
}
// APICallImplementations define constructors for each APICall that is used by the scheduler internally.
type APICallImplementations[T, K APICall] struct {
// PodStatusPatch is a constructor used to create APICall object for pod status patch.
PodStatusPatch func(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *NominatingInfo) T
// PodBinding is a constructor used to create APICall object for pod binding.
PodBinding func(binding *v1.Binding) K
}

View file

@ -19,7 +19,6 @@ package framework
import (
v1 "k8s.io/api/core/v1"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
)
// Extender is an interface for external processes to influence scheduling
@ -34,12 +33,12 @@ type Extender interface {
// The failedNodes and failedAndUnresolvableNodes optionally contains the list
// of failed nodes and failure reasons, except nodes in the latter are
// unresolvable.
Filter(pod *v1.Pod, nodes []fwk.NodeInfo) (filteredNodes []fwk.NodeInfo, failedNodesMap extenderv1.FailedNodesMap, failedAndUnresolvable extenderv1.FailedNodesMap, err error)
Filter(pod *v1.Pod, nodes []NodeInfo) (filteredNodes []NodeInfo, failedNodesMap extenderv1.FailedNodesMap, failedAndUnresolvable extenderv1.FailedNodesMap, err error)
// Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *v1.Pod, nodes []fwk.NodeInfo) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error)
Prioritize(pod *v1.Pod, nodes []NodeInfo) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error)
// Bind delegates the action of binding a pod to a node to the extender.
Bind(binding *v1.Binding) error

View file

@ -19,11 +19,23 @@ limitations under the License.
package framework
import (
"context"
"errors"
"math"
"strings"
"time"
"github.com/google/go-cmp/cmp" //nolint:depguard
"github.com/google/go-cmp/cmp/cmpopts" //nolint:depguard
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
)
// Code is the Status code/type which is returned from plugins.
@ -233,3 +245,507 @@ func AsStatus(err error) *Status {
err: err,
}
}
// NodeToStatusReader is a read-only interface of NodeToStatus passed to each PostFilter plugin.
type NodeToStatusReader interface {
// Get returns the status for given nodeName.
// If the node is not in the map, the AbsentNodesStatus is returned.
Get(nodeName string) *Status
// NodesForStatusCode returns a list of NodeInfos for the nodes that have a given status code.
// It returns the NodeInfos for all matching nodes denoted by AbsentNodesStatus as well.
NodesForStatusCode(nodeLister NodeInfoLister, code Code) ([]NodeInfo, error)
}
// NodeScoreList declares a list of nodes and their scores.
type NodeScoreList []NodeScore
// NodeScore is a struct with node name and score.
type NodeScore struct {
Name string
Score int64
}
// NodePluginScores is a struct with node name and scores for that node.
type NodePluginScores struct {
// Name is node name.
Name string
// Scores is scores from plugins and extenders.
Scores []PluginScore
// TotalScore is the total score in Scores.
TotalScore int64
}
// PluginScore is a struct with plugin/extender name and score.
type PluginScore struct {
// Name is the name of plugin or extender.
Name string
Score int64
}
const (
// MaxNodeScore is the maximum score a Score plugin is expected to return.
MaxNodeScore int64 = 100
// MinNodeScore is the minimum score a Score plugin is expected to return.
MinNodeScore int64 = 0
// MaxTotalScore is the maximum total score.
MaxTotalScore int64 = math.MaxInt64
)
type NominatingMode int
const (
ModeNoop NominatingMode = iota
ModeOverride
)
type NominatingInfo struct {
NominatedNodeName string
NominatingMode NominatingMode
}
func (ni *NominatingInfo) Mode() NominatingMode {
if ni == nil {
return ModeNoop
}
return ni.NominatingMode
}
// WaitingPod represents a pod currently waiting in the permit phase.
type WaitingPod interface {
// GetPod returns a reference to the waiting pod.
GetPod() *v1.Pod
// GetPendingPlugins returns a list of pending Permit plugin's name.
GetPendingPlugins() []string
// Allow declares the waiting pod is allowed to be scheduled by the plugin named as "pluginName".
// If this is the last remaining plugin to allow, then a success signal is delivered
// to unblock the pod.
Allow(pluginName string)
// Reject declares the waiting pod unschedulable.
Reject(pluginName, msg string)
}
// PreFilterResult wraps needed info for scheduler framework to act upon PreFilter phase.
type PreFilterResult struct {
// The set of nodes that should be considered downstream; if nil then
// all nodes are eligible.
NodeNames sets.Set[string]
}
func (p *PreFilterResult) AllNodes() bool {
return p == nil || p.NodeNames == nil
}
func (p *PreFilterResult) Merge(in *PreFilterResult) *PreFilterResult {
if p.AllNodes() && in.AllNodes() {
return nil
}
r := PreFilterResult{}
if p.AllNodes() {
r.NodeNames = in.NodeNames.Clone()
return &r
}
if in.AllNodes() {
r.NodeNames = p.NodeNames.Clone()
return &r
}
r.NodeNames = p.NodeNames.Intersection(in.NodeNames)
return &r
}
// PostFilterResult wraps needed info for scheduler framework to act upon PostFilter phase.
type PostFilterResult struct {
*NominatingInfo
}
// Plugin is the parent type for all the scheduling framework plugins.
type Plugin interface {
Name() string
}
// PreEnqueuePlugin is an interface that must be implemented by "PreEnqueue" plugins.
// These plugins are called prior to adding Pods to activeQ.
// Note: an preEnqueue plugin is expected to be lightweight and efficient, so it's not expected to
// involve expensive calls like accessing external endpoints; otherwise it'd block other
// Pods' enqueuing in event handlers.
type PreEnqueuePlugin interface {
Plugin
// PreEnqueue is called prior to adding Pods to activeQ.
PreEnqueue(ctx context.Context, p *v1.Pod) *Status
}
// LessFunc is the function to sort pod info
type LessFunc func(podInfo1, podInfo2 QueuedPodInfo) bool
// QueueSortPlugin is an interface that must be implemented by "QueueSort" plugins.
// These plugins are used to sort pods in the scheduling queue. Only one queue sort
// plugin may be enabled at a time.
type QueueSortPlugin interface {
Plugin
// Less are used to sort pods in the scheduling queue.
Less(QueuedPodInfo, QueuedPodInfo) bool
}
// EnqueueExtensions is an optional interface that plugins can implement to efficiently
// move unschedulable Pods in internal scheduling queues.
// In the scheduler, Pods can be unschedulable by PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins,
// and Pods rejected by these plugins are requeued based on this extension point.
// Failures from other extension points are regarded as temporal errors (e.g., network failure),
// and the scheduler requeue Pods without this extension point - always requeue Pods to activeQ after backoff.
// This is because such temporal errors cannot be resolved by specific cluster events,
// and we have no choose but keep retrying scheduling until the failure is resolved.
//
// Plugins that make pod unschedulable (PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins) must implement this interface,
// otherwise the default implementation will be used, which is less efficient in requeueing Pods rejected by the plugin.
//
// Also, if EventsToRegister returns an empty list, that means the Pods failed by the plugin are not requeued by any events,
// which doesn't make sense in most cases (very likely misuse)
// since the pods rejected by the plugin could be stuck in the unschedulable pod pool forever.
//
// If plugins other than above extension points support this interface, they are just ignored.
type EnqueueExtensions interface {
Plugin
// EventsToRegister returns a series of possible events that may cause a Pod
// failed by this plugin schedulable. Each event has a callback function that
// filters out events to reduce useless retry of Pod's scheduling.
// The events will be registered when instantiating the internal scheduling queue,
// and leveraged to build event handlers dynamically.
// When it returns an error, the scheduler fails to start.
// Note: the returned list needs to be determined at a startup,
// and the scheduler only evaluates it once during start up.
// Do not change the result during runtime, for example, based on the cluster's state etc.
//
// Appropriate implementation of this function will make Pod's re-scheduling accurate and performant.
EventsToRegister(context.Context) ([]ClusterEventWithHint, error)
}
// PreFilterExtensions is an interface that is included in plugins that allow specifying
// callbacks to make incremental updates to its supposedly pre-calculated
// state.
type PreFilterExtensions interface {
// AddPod is called by the framework while trying to evaluate the impact
// of adding podToAdd to the node while scheduling podToSchedule.
AddPod(ctx context.Context, state CycleState, podToSchedule *v1.Pod, podInfoToAdd PodInfo, nodeInfo NodeInfo) *Status
// RemovePod is called by the framework while trying to evaluate the impact
// of removing podToRemove from the node while scheduling podToSchedule.
RemovePod(ctx context.Context, state CycleState, podToSchedule *v1.Pod, podInfoToRemove PodInfo, nodeInfo NodeInfo) *Status
}
// PreFilterPlugin is an interface that must be implemented by "PreFilter" plugins.
// These plugins are called at the beginning of the scheduling cycle.
type PreFilterPlugin interface {
Plugin
// PreFilter is called at the beginning of the scheduling cycle. All PreFilter
// plugins must return success or the pod will be rejected. PreFilter could optionally
// return a PreFilterResult to influence which nodes to evaluate downstream. This is useful
// for cases where it is possible to determine the subset of nodes to process in O(1) time.
// When PreFilterResult filters out some Nodes, the framework considers Nodes that are filtered out as getting "UnschedulableAndUnresolvable".
// i.e., those Nodes will be out of the candidates of the preemption.
//
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
PreFilter(ctx context.Context, state CycleState, p *v1.Pod, nodes []NodeInfo) (*PreFilterResult, *Status)
// PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one,
// or nil if it does not. A Pre-filter plugin can provide extensions to incrementally
// modify its pre-processed info. The framework guarantees that the extensions
// AddPod/RemovePod will only be called after PreFilter, possibly on a cloned
// CycleState, and may call those functions more than once before calling
// Filter again on a specific node.
PreFilterExtensions() PreFilterExtensions
}
// FilterPlugin is an interface for Filter plugins. These plugins are called at the
// filter extension point for filtering out hosts that cannot run a pod.
// This concept used to be called 'predicate' in the original scheduler.
// These plugins should return "Success", "Unschedulable" or "Error" in Status.code.
// However, the scheduler accepts other valid codes as well.
// Anything other than "Success" will lead to exclusion of the given host from
// running the pod.
type FilterPlugin interface {
Plugin
// Filter is called by the scheduling framework.
// All FilterPlugins should return "Success" to declare that
// the given node fits the pod. If Filter doesn't return "Success",
// it will return "Unschedulable", "UnschedulableAndUnresolvable" or "Error".
//
// "Error" aborts pod scheduling and puts the pod into the backoff queue.
//
// For the node being evaluated, Filter plugins should look at the passed
// nodeInfo reference for this particular node's information (e.g., pods
// considered to be running on the node) instead of looking it up in the
// NodeInfoSnapshot because we don't guarantee that they will be the same.
// For example, during preemption, we may pass a copy of the original
// nodeInfo object that has some pods removed from it to evaluate the
// possibility of preempting them to schedule the target pod.
//
// Plugins are encouraged to check the context for cancellation.
// Once canceled, they should return as soon as possible with
// an UnschedulableAndUnresolvable status that includes the
// `context.Cause(ctx)` error explanation. For example, the
// context gets canceled when a sufficient number of suitable
// nodes have been found and searching for more isn't necessary
// anymore.
Filter(ctx context.Context, state CycleState, pod *v1.Pod, nodeInfo NodeInfo) *Status
}
// PostFilterPlugin is an interface for "PostFilter" plugins. These plugins are called
// after a pod cannot be scheduled.
type PostFilterPlugin interface {
Plugin
// PostFilter is called by the scheduling framework
// when the scheduling cycle failed at PreFilter or Filter by Unschedulable or UnschedulableAndUnresolvable.
// NodeToStatusReader has statuses that each Node got in PreFilter or Filter phase.
//
// If you're implementing a custom preemption with PostFilter, ignoring Nodes with UnschedulableAndUnresolvable is the responsibility of your plugin,
// meaning NodeToStatusReader could have Nodes with UnschedulableAndUnresolvable
// and the scheduling framework does call PostFilter plugins even when all Nodes in NodeToStatusReader are UnschedulableAndUnresolvable.
//
// A PostFilter plugin should return one of the following statuses:
// - Unschedulable: the plugin gets executed successfully but the pod cannot be made schedulable.
// - Success: the plugin gets executed successfully and the pod can be made schedulable.
// - Error: the plugin aborts due to some internal error.
//
// Informational plugins should be configured ahead of other ones, and always return Unschedulable status.
// Optionally, a non-nil PostFilterResult may be returned along with a Success status. For example,
// a preemption plugin may choose to return nominatedNodeName, so that framework can reuse that to update the
// preemptor pod's .spec.status.nominatedNodeName field.
PostFilter(ctx context.Context, state CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status)
}
// PreScorePlugin is an interface for "PreScore" plugin. PreScore is an
// informational extension point. Plugins will be called with a list of nodes
// that passed the filtering phase. A plugin may use this data to update internal
// state or to generate logs/metrics.
type PreScorePlugin interface {
Plugin
// PreScore is called by the scheduling framework after a list of nodes
// passed the filtering phase. All prescore plugins must return success or
// the pod will be rejected
// When it returns Skip status, other fields in status are just ignored,
// and coupled Score plugin will be skipped in this scheduling cycle.
PreScore(ctx context.Context, state CycleState, pod *v1.Pod, nodes []NodeInfo) *Status
}
// ScoreExtensions is an interface for Score extended functionality.
type ScoreExtensions interface {
// NormalizeScore is called for all node scores produced by the same plugin's "Score"
// method. A successful run of NormalizeScore will update the scores list and return
// a success status.
NormalizeScore(ctx context.Context, state CycleState, p *v1.Pod, scores NodeScoreList) *Status
}
// ScorePlugin is an interface that must be implemented by "Score" plugins to rank
// nodes that passed the filtering phase.
type ScorePlugin interface {
Plugin
// Score is called on each filtered node. It must return success and an integer
// indicating the rank of the node. All scoring plugins must return success or
// the pod will be rejected.
Score(ctx context.Context, state CycleState, p *v1.Pod, nodeInfo NodeInfo) (int64, *Status)
// ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not.
ScoreExtensions() ScoreExtensions
}
// ReservePlugin is an interface for plugins with Reserve and Unreserve
// methods. These are meant to update the state of the plugin. This concept
// used to be called 'assume' in the original scheduler. These plugins should
// return only Success or Error in Status.code. However, the scheduler accepts
// other valid codes as well. Anything other than Success will lead to
// rejection of the pod.
type ReservePlugin interface {
Plugin
// Reserve is called by the scheduling framework when the scheduler cache is
// updated. If this method returns a failed Status, the scheduler will call
// the Unreserve method for all enabled ReservePlugins.
Reserve(ctx context.Context, state CycleState, p *v1.Pod, nodeName string) *Status
// Unreserve is called by the scheduling framework when a reserved pod was
// rejected, an error occurred during reservation of subsequent plugins, or
// in a later phase. The Unreserve method implementation must be idempotent
// and may be called by the scheduler even if the corresponding Reserve
// method for the same plugin was not called.
Unreserve(ctx context.Context, state CycleState, p *v1.Pod, nodeName string)
}
// PreBindPlugin is an interface that must be implemented by "PreBind" plugins.
// These plugins are called before a pod being scheduled.
type PreBindPlugin interface {
Plugin
// PreBindPreFlight is called before PreBind, and the plugin is supposed to return Success, Skip, or Error status.
// If it returns Success, it means this PreBind plugin will handle this pod.
// If it returns Skip, it means this PreBind plugin has nothing to do with the pod, and PreBind will be skipped.
// This function should be lightweight, and shouldn't do any actual operation, e.g., creating a volume etc.
PreBindPreFlight(ctx context.Context, state CycleState, p *v1.Pod, nodeName string) *Status
// PreBind is called before binding a pod. All prebind plugins must return
// success or the pod will be rejected and won't be sent for binding.
PreBind(ctx context.Context, state CycleState, p *v1.Pod, nodeName string) *Status
}
// PostBindPlugin is an interface that must be implemented by "PostBind" plugins.
// These plugins are called after a pod is successfully bound to a node.
type PostBindPlugin interface {
Plugin
// PostBind is called after a pod is successfully bound. These plugins are
// informational. A common application of this extension point is for cleaning
// up. If a plugin needs to clean-up its state after a pod is scheduled and
// bound, PostBind is the extension point that it should register.
PostBind(ctx context.Context, state CycleState, p *v1.Pod, nodeName string)
}
// PermitPlugin is an interface that must be implemented by "Permit" plugins.
// These plugins are called before a pod is bound to a node.
type PermitPlugin interface {
Plugin
// Permit is called before binding a pod (and before prebind plugins). Permit
// plugins are used to prevent or delay the binding of a Pod. A permit plugin
// must return success or wait with timeout duration, or the pod will be rejected.
// The pod will also be rejected if the wait timeout or the pod is rejected while
// waiting. Note that if the plugin returns "wait", the framework will wait only
// after running the remaining plugins given that no other plugin rejects the pod.
Permit(ctx context.Context, state CycleState, p *v1.Pod, nodeName string) (*Status, time.Duration)
}
// BindPlugin is an interface that must be implemented by "Bind" plugins. Bind
// plugins are used to bind a pod to a Node.
type BindPlugin interface {
Plugin
// Bind plugins will not be called until all pre-bind plugins have completed. Each
// bind plugin is called in the configured order. A bind plugin may choose whether
// or not to handle the given Pod. If a bind plugin chooses to handle a Pod, the
// remaining bind plugins are skipped. When a bind plugin does not handle a pod,
// it must return Skip in its Status code. If a bind plugin returns an Error, the
// pod is rejected and will not be bound.
Bind(ctx context.Context, state CycleState, p *v1.Pod, nodeName string) *Status
}
// Handle provides data and some tools that plugins can use. It is
// passed to the plugin factories at the time of plugin initialization. Plugins
// must store and use this handle to call framework functions.
type Handle interface {
// PodNominator abstracts operations to maintain nominated Pods.
PodNominator
// PluginsRunner abstracts operations to run some plugins.
PluginsRunner
// PodActivator abstracts operations in the scheduling queue.
PodActivator
// SnapshotSharedLister returns listers from the latest NodeInfo Snapshot. The snapshot
// is taken at the beginning of a scheduling cycle and remains unchanged until
// a pod finishes "Permit" point.
//
// It should be used only during scheduling cycle:
// - There is no guarantee that the information remains unchanged in the binding phase of scheduling.
// So, plugins shouldn't use it in the binding cycle (pre-bind/bind/post-bind/un-reserve plugin)
// otherwise, a concurrent read/write error might occur.
// - There is no guarantee that the information is always up-to-date.
// So, plugins shouldn't use it in QueueingHint and PreEnqueue
// otherwise, they might make a decision based on stale information.
//
// Instead, they should use the resources getting from Informer created from SharedInformerFactory().
SnapshotSharedLister() SharedLister
// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map.
IterateOverWaitingPods(callback func(WaitingPod))
// GetWaitingPod returns a waiting pod given its UID.
GetWaitingPod(uid types.UID) WaitingPod
// RejectWaitingPod rejects a waiting pod given its UID.
// The return value indicates if the pod is waiting or not.
RejectWaitingPod(uid types.UID) bool
// ClientSet returns a kubernetes clientSet.
ClientSet() clientset.Interface
// KubeConfig returns the raw kube config.
KubeConfig() *restclient.Config
// EventRecorder returns an event recorder.
EventRecorder() events.EventRecorder
SharedInformerFactory() informers.SharedInformerFactory
// SharedDRAManager can be used to obtain DRA objects, and track modifications to them in-memory - mainly by the DRA plugin.
// A non-default implementation can be plugged into the framework to simulate the state of DRA objects.
SharedDRAManager() SharedDRAManager
// RunFilterPluginsWithNominatedPods runs the set of configured filter plugins for nominated pod on the given node.
RunFilterPluginsWithNominatedPods(ctx context.Context, state CycleState, pod *v1.Pod, info NodeInfo) *Status
// Extenders returns registered scheduler extenders.
Extenders() []Extender
// Parallelizer returns a parallelizer holding parallelism for scheduler.
Parallelizer() Parallelizer
// APIDispatcher returns a APIDispatcher that can be used to dispatch API calls directly.
// This is non-nil if the SchedulerAsyncAPICalls feature gate is enabled.
APIDispatcher() APIDispatcher
// APICacher returns an APICacher that coordinates API calls with the scheduler's internal cache.
// Use this to ensure the scheduler's view of the cluster remains consistent.
// This is non-nil if the SchedulerAsyncAPICalls feature gate is enabled.
APICacher() APICacher
}
// Parallelizer helps run scheduling operations in parallel chunks where possible, to improve performance and CPU utilization.
type Parallelizer interface {
// Until executes the given func doWorkPiece in parallel chunks, if applicable. Max number of chunks is param pieces.
Until(ctx context.Context, pieces int, doWorkPiece workqueue.DoWorkPieceFunc, operation string)
}
// PodActivator abstracts operations in the scheduling queue.
type PodActivator interface {
// Activate moves the given pods to activeQ.
// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight,
// the wildcard event is registered so that the pod will be requeued when it comes back.
// But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod),
// Activate would ignore the pod.
Activate(logger klog.Logger, pods map[string]*v1.Pod)
}
// PodNominator abstracts operations to maintain nominated Pods.
type PodNominator interface {
// AddNominatedPod adds the given pod to the nominator or
// updates it if it already exists.
AddNominatedPod(logger klog.Logger, pod PodInfo, nominatingInfo *NominatingInfo)
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache. It's a no-op if it doesn't exist.
DeleteNominatedPodIfExists(pod *v1.Pod)
// UpdateNominatedPod updates the <oldPod> with <newPod>.
UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo PodInfo)
// NominatedPodsForNode returns nominatedPods on the given node.
NominatedPodsForNode(nodeName string) []PodInfo
}
// PluginsRunner abstracts operations to run some plugins.
// This is used by preemption PostFilter plugins when evaluating the feasibility of
// scheduling the pod on nodes when certain running pods get evicted.
type PluginsRunner interface {
// RunPreScorePlugins runs the set of configured PreScore plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected.
RunPreScorePlugins(context.Context, CycleState, *v1.Pod, []NodeInfo) *Status
// RunScorePlugins runs the set of configured scoring plugins.
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status.
RunScorePlugins(context.Context, CycleState, *v1.Pod, []NodeInfo) ([]NodePluginScores, *Status)
// RunFilterPlugins runs the set of configured Filter plugins for pod on
// the given node. Note that for the node being evaluated, the passed nodeInfo
// reference could be different from the one in NodeInfoSnapshot map (e.g., pods
// considered to be running on the node could be different). For example, during
// preemption, we may pass a copy of the original nodeInfo object that has some pods
// removed from it to evaluate the possibility of preempting them to
// schedule the target pod.
RunFilterPlugins(context.Context, CycleState, *v1.Pod, NodeInfo) *Status
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionAddPod(ctx context.Context, state CycleState, podToSchedule *v1.Pod, podInfoToAdd PodInfo, nodeInfo NodeInfo) *Status
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionRemovePod(ctx context.Context, state CycleState, podToSchedule *v1.Pod, podInfoToRemove PodInfo, nodeInfo NodeInfo) *Status
}

View file

@ -0,0 +1,262 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/util/sets"
)
var errorStatus = NewStatus(Error, "internal error")
var statusWithErr = AsStatus(errors.New("internal error"))
func TestStatus(t *testing.T) {
tests := []struct {
name string
status *Status
expectedCode Code
expectedMessage string
expectedIsSuccess bool
expectedIsWait bool
expectedIsSkip bool
expectedAsError error
}{
{
name: "success status",
status: NewStatus(Success, ""),
expectedCode: Success,
expectedMessage: "",
expectedIsSuccess: true,
expectedIsWait: false,
expectedIsSkip: false,
expectedAsError: nil,
},
{
name: "wait status",
status: NewStatus(Wait, ""),
expectedCode: Wait,
expectedMessage: "",
expectedIsSuccess: false,
expectedIsWait: true,
expectedIsSkip: false,
expectedAsError: nil,
},
{
name: "error status",
status: NewStatus(Error, "unknown error"),
expectedCode: Error,
expectedMessage: "unknown error",
expectedIsSuccess: false,
expectedIsWait: false,
expectedIsSkip: false,
expectedAsError: errors.New("unknown error"),
},
{
name: "skip status",
status: NewStatus(Skip, ""),
expectedCode: Skip,
expectedMessage: "",
expectedIsSuccess: false,
expectedIsWait: false,
expectedIsSkip: true,
expectedAsError: nil,
},
{
name: "nil status",
status: nil,
expectedCode: Success,
expectedMessage: "",
expectedIsSuccess: true,
expectedIsSkip: false,
expectedAsError: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.status.Code() != test.expectedCode {
t.Errorf("expect status.Code() returns %v, but %v", test.expectedCode, test.status.Code())
}
if test.status.Message() != test.expectedMessage {
t.Errorf("expect status.Message() returns %v, but %v", test.expectedMessage, test.status.Message())
}
if test.status.IsSuccess() != test.expectedIsSuccess {
t.Errorf("expect status.IsSuccess() returns %v, but %v", test.expectedIsSuccess, test.status.IsSuccess())
}
if test.status.IsWait() != test.expectedIsWait {
t.Errorf("status.IsWait() returns %v, but want %v", test.status.IsWait(), test.expectedIsWait)
}
if test.status.IsSkip() != test.expectedIsSkip {
t.Errorf("status.IsSkip() returns %v, but want %v", test.status.IsSkip(), test.expectedIsSkip)
}
if errors.Is(test.status.AsError(), test.expectedAsError) {
return
}
if test.status.AsError().Error() != test.expectedAsError.Error() {
t.Errorf("expect status.AsError() returns %v, but %v", test.expectedAsError, test.status.AsError())
}
})
}
}
func TestPreFilterResultMerge(t *testing.T) {
tests := map[string]struct {
receiver *PreFilterResult
in *PreFilterResult
want *PreFilterResult
}{
"all nil": {},
"nil receiver empty input": {
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"nil receiver populated input": {
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"empty receiver populated input": {
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"populated receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver and input": {
receiver: &PreFilterResult{NodeNames: sets.New("node1", "node2")},
in: &PreFilterResult{NodeNames: sets.New("node2", "node3")},
want: &PreFilterResult{NodeNames: sets.New("node2")},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := test.receiver.Merge(test.in)
if diff := cmp.Diff(test.want, got); diff != "" {
t.Errorf("unexpected diff (-want, +got):\n%s", diff)
}
})
}
}
func TestIsStatusEqual(t *testing.T) {
tests := []struct {
name string
x, y *Status
want bool
}{
{
name: "two nil should be equal",
x: nil,
y: nil,
want: true,
},
{
name: "nil should be equal to success status",
x: nil,
y: NewStatus(Success),
want: true,
},
{
name: "nil should not be equal with status except success",
x: nil,
y: NewStatus(Error, "internal error"),
want: false,
},
{
name: "one status should be equal to itself",
x: errorStatus,
y: errorStatus,
want: true,
},
{
name: "same type statuses without reasons should be equal",
x: NewStatus(Success),
y: NewStatus(Success),
want: true,
},
{
name: "statuses with same message should be equal",
x: NewStatus(Unschedulable, "unschedulable"),
y: NewStatus(Unschedulable, "unschedulable"),
want: true,
},
{
name: "error statuses with same message should be equal",
x: NewStatus(Error, "error"),
y: NewStatus(Error, "error"),
want: true,
},
{
name: "statuses with different reasons should not be equal",
x: NewStatus(Unschedulable, "unschedulable"),
y: NewStatus(Unschedulable, "unschedulable", "injected filter status"),
want: false,
},
{
name: "statuses with different codes should not be equal",
x: NewStatus(Error, "internal error"),
y: NewStatus(Unschedulable, "internal error"),
want: false,
},
{
name: "wrap error status should be equal with original one",
x: statusWithErr,
y: AsStatus(fmt.Errorf("error: %w", statusWithErr.AsError())),
want: true,
},
{
name: "statues with different errors that have the same message shouldn't be equal",
x: AsStatus(errors.New("error")),
y: AsStatus(errors.New("error")),
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.x.Equal(tt.y); got != tt.want {
t.Errorf("cmp.Equal() = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -21,19 +21,18 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/structured"
fwk "k8s.io/kube-scheduler/framework"
)
// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name.
type NodeInfoLister interface {
// List returns the list of NodeInfos.
List() ([]fwk.NodeInfo, error)
List() ([]NodeInfo, error)
// HavePodsWithAffinityList returns the list of NodeInfos of nodes with pods with affinity terms.
HavePodsWithAffinityList() ([]fwk.NodeInfo, error)
HavePodsWithAffinityList() ([]NodeInfo, error)
// HavePodsWithRequiredAntiAffinityList returns the list of NodeInfos of nodes with pods with required anti-affinity terms.
HavePodsWithRequiredAntiAffinityList() ([]fwk.NodeInfo, error)
HavePodsWithRequiredAntiAffinityList() ([]NodeInfo, error)
// Get returns the NodeInfo of the given node name.
Get(nodeName string) (fwk.NodeInfo, error)
Get(nodeName string) (NodeInfo, error)
}
// StorageInfoLister interface represents anything that handles storage-related operations and resources.

View file

@ -12,11 +12,17 @@ require (
k8s.io/apimachinery v0.0.0
k8s.io/client-go v0.0.0
k8s.io/component-base v0.0.0
k8s.io/dynamic-resource-allocation v0.0.0
k8s.io/klog/v2 v2.130.1
sigs.k8s.io/yaml v1.6.0
)
require (
cel.dev/expr v0.24.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
@ -25,27 +31,45 @@ require (
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/spf13/cobra v1.10.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiserver v0.0.0 // indirect
k8s.io/component-helpers v0.0.0 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
@ -56,6 +80,11 @@ require (
replace (
k8s.io/api => ../api
k8s.io/apimachinery => ../apimachinery
k8s.io/apiserver => ../apiserver
k8s.io/client-go => ../client-go
k8s.io/component-base => ../component-base
k8s.io/component-helpers => ../component-helpers
k8s.io/dynamic-resource-allocation => ../dynamic-resource-allocation
k8s.io/kms => ../kms
k8s.io/kubelet => ../kubelet
)

View file

@ -1,18 +1,33 @@
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
@ -31,8 +46,11 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@ -44,14 +62,23 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@ -59,6 +86,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -72,6 +100,7 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFd
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
@ -80,15 +109,27 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0=
github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE=
github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@ -100,19 +141,34 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
@ -124,6 +180,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
@ -138,6 +196,7 @@ golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKl
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -162,7 +221,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
@ -172,8 +233,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@ -184,6 +248,7 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=

View file

@ -35,7 +35,6 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulerutils "k8s.io/kubernetes/test/integration/scheduler"
@ -46,7 +45,7 @@ import (
var lowPriority, mediumPriority, highPriority int32 = 100, 200, 300
var _ framework.FilterPlugin = &fooPlugin{}
var _ fwk.FilterPlugin = &fooPlugin{}
type fooPlugin struct {
}
@ -74,8 +73,8 @@ func (pl *fooPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWith
}
// newPlugin returns a plugin factory with specified Plugin.
func newPlugin(plugin framework.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func newPlugin(plugin fwk.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return plugin, nil
}
}

View file

@ -28,7 +28,6 @@ import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
schedulerutils "k8s.io/kubernetes/test/integration/scheduler"
testutils "k8s.io/kubernetes/test/integration/util"
@ -96,7 +95,7 @@ func Test_PutNominatedNodeNameInBindingCycle(t *testing.T) {
cancel := make(chan struct{})
tests := []struct {
name string
plugin framework.Plugin
plugin fwk.Plugin
expectNominatedNodeName bool
cleanup func()
}{
@ -136,13 +135,13 @@ func Test_PutNominatedNodeNameInBindingCycle(t *testing.T) {
defer test.cleanup()
}
pf := func(plugin framework.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
pf := func(plugin fwk.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return plugin, nil
}
}
plugins := []framework.Plugin{&NoNNNPostBindPlugin{cancel: testContext.Ctx.Done(), t: t}}
plugins := []fwk.Plugin{&NoNNNPostBindPlugin{cancel: testContext.Ctx.Done(), t: t}}
if test.plugin != nil {
plugins = append(plugins, test.plugin)
}

View file

@ -63,14 +63,14 @@ import (
// imported from testutils
var (
initRegistryAndConfig = func(t *testing.T, plugins ...framework.Plugin) (frameworkruntime.Registry, schedulerconfig.KubeSchedulerProfile) {
initRegistryAndConfig = func(t *testing.T, plugins ...fwk.Plugin) (frameworkruntime.Registry, schedulerconfig.KubeSchedulerProfile) {
return schedulerutils.InitRegistryAndConfig(t, newPlugin, plugins...)
}
)
// newPlugin returns a plugin factory with specified Plugin.
func newPlugin(plugin framework.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
func newPlugin(plugin fwk.Plugin) frameworkruntime.PluginFactory {
return func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
switch pl := plugin.(type) {
case *PermitPlugin:
pl.fh = fh
@ -159,7 +159,7 @@ func (fp *FilterPlugin) deepCopy() *FilterPlugin {
type PostFilterPlugin struct {
name string
fh framework.Handle
fh fwk.Handle
numPostFilterCalled int
failPostFilter bool
rejectPostFilter bool
@ -269,7 +269,7 @@ type PermitPlugin struct {
waitingPod string
rejectingPod string
allowingPod string
fh framework.Handle
fh fwk.Handle
}
func (pp *PermitPlugin) deepCopy() *PermitPlugin {
@ -307,24 +307,24 @@ const (
permitPluginName = "permit-plugin"
)
var _ framework.PreEnqueuePlugin = &PreEnqueuePlugin{}
var _ framework.PreFilterPlugin = &PreFilterPlugin{}
var _ framework.PostFilterPlugin = &PostFilterPlugin{}
var _ framework.ScorePlugin = &ScorePlugin{}
var _ framework.FilterPlugin = &FilterPlugin{}
var _ framework.EnqueueExtensions = &FilterPlugin{}
var _ framework.ScorePlugin = &ScorePlugin{}
var _ framework.ScorePlugin = &ScoreWithNormalizePlugin{}
var _ framework.EnqueueExtensions = &ScorePlugin{}
var _ framework.ReservePlugin = &ReservePlugin{}
var _ framework.PreScorePlugin = &PreScorePlugin{}
var _ framework.PreBindPlugin = &PreBindPlugin{}
var _ framework.EnqueueExtensions = &PreBindPlugin{}
var _ framework.BindPlugin = &BindPlugin{}
var _ framework.PostBindPlugin = &PostBindPlugin{}
var _ framework.PermitPlugin = &PermitPlugin{}
var _ framework.EnqueueExtensions = &PermitPlugin{}
var _ framework.QueueSortPlugin = &QueueSortPlugin{}
var _ fwk.PreEnqueuePlugin = &PreEnqueuePlugin{}
var _ fwk.PreFilterPlugin = &PreFilterPlugin{}
var _ fwk.PostFilterPlugin = &PostFilterPlugin{}
var _ fwk.ScorePlugin = &ScorePlugin{}
var _ fwk.FilterPlugin = &FilterPlugin{}
var _ fwk.EnqueueExtensions = &FilterPlugin{}
var _ fwk.ScorePlugin = &ScorePlugin{}
var _ fwk.ScorePlugin = &ScoreWithNormalizePlugin{}
var _ fwk.EnqueueExtensions = &ScorePlugin{}
var _ fwk.ReservePlugin = &ReservePlugin{}
var _ fwk.PreScorePlugin = &PreScorePlugin{}
var _ fwk.PreBindPlugin = &PreBindPlugin{}
var _ fwk.EnqueueExtensions = &PreBindPlugin{}
var _ fwk.BindPlugin = &BindPlugin{}
var _ fwk.PostBindPlugin = &PostBindPlugin{}
var _ fwk.PermitPlugin = &PermitPlugin{}
var _ fwk.EnqueueExtensions = &PermitPlugin{}
var _ fwk.QueueSortPlugin = &QueueSortPlugin{}
func (ep *QueueSortPlugin) Name() string {
return queuesortPluginName
@ -375,12 +375,12 @@ func (sp *ScorePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Po
if sp.numScoreCalled == 1 {
// The first node is scored the highest, the rest is scored lower.
sp.highScoreNode = nodeInfo.Node().Name
score = framework.MaxNodeScore
score = fwk.MaxNodeScore
}
return score, nil
}
func (sp *ScorePlugin) ScoreExtensions() framework.ScoreExtensions {
func (sp *ScorePlugin) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
@ -403,12 +403,12 @@ func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state fwk.CycleSt
return score, nil
}
func (sp *ScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *fwk.Status {
func (sp *ScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
sp.numNormalizeScoreCalled++
return nil
}
func (sp *ScoreWithNormalizePlugin) ScoreExtensions() framework.ScoreExtensions {
func (sp *ScoreWithNormalizePlugin) ScoreExtensions() fwk.ScoreExtensions {
return sp
}
@ -576,12 +576,12 @@ func (pp *PreFilterPlugin) Name() string {
}
// Extensions returns the PreFilterExtensions interface.
func (pp *PreFilterPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (pp *PreFilterPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
// PreFilter is a test function that returns (true, nil) or errors for testing.
func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
pp.numPreFilterCalled++
if pp.failPreFilter {
return nil, fwk.NewStatus(fwk.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))
@ -590,7 +590,7 @@ func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState,
return nil, fwk.NewStatus(fwk.Unschedulable, fmt.Sprintf("reject pod %v", pod.Name))
}
if len(pp.preFilterResultNodes) != 0 {
return &framework.PreFilterResult{NodeNames: pp.preFilterResultNodes}, nil
return &fwk.PreFilterResult{NodeNames: pp.preFilterResultNodes}, nil
}
return nil, nil
}
@ -600,7 +600,7 @@ func (pp *PostFilterPlugin) Name() string {
return pp.name
}
func (pp *PostFilterPlugin) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *fwk.Status) {
func (pp *PostFilterPlugin) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, _ fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
pp.numPostFilterCalled++
nodeInfos, err := pp.fh.SnapshotSharedLister().NodeInfos().List()
if err != nil {
@ -660,7 +660,7 @@ func (pp *PermitPlugin) Permit(ctx context.Context, state fwk.CycleState, pod *v
}
if pp.waitAndRejectPermit {
pp.rejectingPod = pod.Name
pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) {
pp.fh.IterateOverWaitingPods(func(wp fwk.WaitingPod) {
wp.Reject(pp.name, fmt.Sprintf("reject pod %v", wp.GetPod().Name))
})
return fwk.NewStatus(fwk.Unschedulable, fmt.Sprintf("reject pod %v", pod.Name)), 0
@ -676,14 +676,14 @@ func (pp *PermitPlugin) Permit(ctx context.Context, state fwk.CycleState, pod *v
// allowAllPods allows all waiting pods.
func (pp *PermitPlugin) allowAllPods() {
pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { wp.Allow(pp.name) })
pp.fh.IterateOverWaitingPods(func(wp fwk.WaitingPod) { wp.Allow(pp.name) })
}
// rejectAllPods rejects all waiting pods.
func (pp *PermitPlugin) rejectAllPods() {
pp.mutex.Lock()
defer pp.mutex.Unlock()
pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { wp.Reject(pp.name, "rejectAllPods") })
pp.fh.IterateOverWaitingPods(func(wp fwk.WaitingPod) { wp.Reject(pp.name, "rejectAllPods") })
}
func (pp *PermitPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
@ -1419,7 +1419,7 @@ func TestUnReserveReservePlugins(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var pls []framework.Plugin
var pls []fwk.Plugin
for _, pl := range test.plugins {
pls = append(pls, pl)
}
@ -1517,7 +1517,7 @@ func TestUnReservePermitPlugins(t *testing.T) {
name: "reservePlugin",
failReserve: false,
}
registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...)
registry, profile := initRegistryAndConfig(t, []fwk.Plugin{test.plugin, reservePlugin}...)
testCtx, teardown := schedulerutils.InitTestSchedulerForFrameworkTest(t, testContext, 2, true,
scheduler.WithProfiles(profile),
@ -1589,7 +1589,7 @@ func TestUnReservePreBindPlugins(t *testing.T) {
name: "reservePlugin",
failReserve: false,
}
registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...)
registry, profile := initRegistryAndConfig(t, []fwk.Plugin{test.plugin, reservePlugin}...)
testCtx, teardown := schedulerutils.InitTestSchedulerForFrameworkTest(t, testContext, 2, true,
scheduler.WithProfiles(profile),
@ -1658,7 +1658,7 @@ func TestUnReserveBindPlugins(t *testing.T) {
name: "reservePlugin",
failReserve: false,
}
registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...)
registry, profile := initRegistryAndConfig(t, []fwk.Plugin{test.plugin, reservePlugin}...)
test.plugin.client = testContext.ClientSet
@ -2093,7 +2093,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
t.Errorf("Error while creating a test pod: %v", err)
}
var waitingPod framework.WaitingPod
var waitingPod fwk.WaitingPod
// Wait until the test pod is actually waiting.
wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
@ -2145,7 +2145,7 @@ func TestPermitPluginsCancelled(t *testing.T) {
t.Errorf("Error while creating a test pod: %v", err)
}
var waitingPod framework.WaitingPod
var waitingPod fwk.WaitingPod
// Wait until the test pod is actually waiting.
wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
@ -2554,7 +2554,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
// Wait until the waiting-pod is actually waiting.
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
permitPlugin.fh.IterateOverWaitingPods(func(wp fwk.WaitingPod) { w = true })
return w, nil
}); err != nil {
t.Fatalf("The waiting pod is expected to be waiting: %v", err)
@ -2579,7 +2579,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
if w := tt.waitingPod; w != nil {
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
permitPlugin.fh.IterateOverWaitingPods(func(wp fwk.WaitingPod) { w = true })
return !w, nil
}); err != nil {
t.Fatalf("Expected the waiting pod to get preempted.")
@ -2619,8 +2619,8 @@ const (
jobPluginName = "job plugin"
)
var _ framework.PreFilterPlugin = &JobPlugin{}
var _ framework.PostBindPlugin = &PostBindPlugin{}
var _ fwk.PreFilterPlugin = &JobPlugin{}
var _ fwk.PostBindPlugin = &PostBindPlugin{}
type JobPlugin struct {
podLister listersv1.PodLister
@ -2631,7 +2631,7 @@ func (j *JobPlugin) Name() string {
return jobPluginName
}
func (j *JobPlugin) PreFilter(_ context.Context, _ fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*framework.PreFilterResult, *fwk.Status) {
func (j *JobPlugin) PreFilter(_ context.Context, _ fwk.CycleState, p *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
labelSelector := labels.SelectorFromSet(labels.Set{"driver": ""})
driverPods, err := j.podLister.Pods(p.Namespace).List(labelSelector)
if err != nil {
@ -2643,7 +2643,7 @@ func (j *JobPlugin) PreFilter(_ context.Context, _ fwk.CycleState, p *v1.Pod, no
return nil, nil
}
func (j *JobPlugin) PreFilterExtensions() framework.PreFilterExtensions {
func (j *JobPlugin) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
@ -2678,7 +2678,7 @@ func (j *JobPlugin) PostBind(_ context.Context, state fwk.CycleState, p *v1.Pod,
func TestActivatePods(t *testing.T) {
var jobPlugin *JobPlugin
// Create a plugin registry for testing. Register a Job plugin.
registry := frameworkruntime.Registry{jobPluginName: func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
registry := frameworkruntime.Registry{jobPluginName: func(_ context.Context, _ runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
jobPlugin = &JobPlugin{podLister: fh.SharedInformerFactory().Core().V1().Pods().Lister()}
return jobPlugin, nil
}}
@ -2749,10 +2749,10 @@ func TestActivatePods(t *testing.T) {
}
}
var _ framework.PreEnqueuePlugin = &SchedulingGatesPluginWithEvents{}
var _ framework.EnqueueExtensions = &SchedulingGatesPluginWithEvents{}
var _ framework.PreEnqueuePlugin = &SchedulingGatesPluginWOEvents{}
var _ framework.EnqueueExtensions = &SchedulingGatesPluginWOEvents{}
var _ fwk.PreEnqueuePlugin = &SchedulingGatesPluginWithEvents{}
var _ fwk.EnqueueExtensions = &SchedulingGatesPluginWithEvents{}
var _ fwk.PreEnqueuePlugin = &SchedulingGatesPluginWOEvents{}
var _ fwk.EnqueueExtensions = &SchedulingGatesPluginWOEvents{}
const (
schedulingGatesPluginWithEvents = "scheduling-gates-with-events"
@ -2801,7 +2801,7 @@ func (pl *SchedulingGatesPluginWOEvents) EventsToRegister(_ context.Context) ([]
// This test helps to verify registering nil events for PreEnqueue plugin works as expected.
func TestPreEnqueuePluginEventsToRegister(t *testing.T) {
num := func(pl framework.Plugin) int {
num := func(pl fwk.Plugin) int {
switch item := pl.(type) {
case *SchedulingGatesPluginWithEvents:
return item.called
@ -2852,7 +2852,7 @@ func TestPreEnqueuePluginEventsToRegister(t *testing.T) {
testContext := testutils.InitTestAPIServer(t, "preenqueue-plugin", nil)
// use new plugin every time to clear counts
var plugin framework.PreEnqueuePlugin
var plugin fwk.PreEnqueuePlugin
if tt.withEvents {
plugin = &SchedulingGatesPluginWithEvents{SchedulingGates: schedulinggates.SchedulingGates{}}
} else {

Some files were not shown because too many files have changed in this diff Show more