Merge pull request #134947 from aojea/dra_status_check

Fine-grained Authorization for ResourceClaim Status Updates
This commit is contained in:
Kubernetes Prow Robot 2026-03-26 22:34:18 +05:30 committed by GitHub
commit 3fea8a2fef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 1816 additions and 34 deletions

View file

@ -432,7 +432,10 @@ func (c CompletedConfig) StorageProviders(client *kubernetes.Clientset) ([]contr
appsrest.StorageProvider{},
admissionregistrationrest.RESTStorageProvider{Authorizer: c.ControlPlane.Generic.Authorization.Authorizer, DiscoveryClient: client.Discovery()},
eventsrest.RESTStorageProvider{TTL: c.ControlPlane.EventTTL},
resourcerest.RESTStorageProvider{NamespaceClient: client.CoreV1().Namespaces()},
resourcerest.RESTStorageProvider{
NamespaceClient: client.CoreV1().Namespaces(),
Authorizer: c.ControlPlane.Generic.Authorization.Authorizer,
},
}
if AdditionalStorageProvidersForTests != nil {

View file

@ -270,6 +270,16 @@ const (
// status from DRA drivers.
DRAResourceClaimDeviceStatus featuregate.Feature = "DRAResourceClaimDeviceStatus"
// owner: @aojea
// kep: http://kep.k8s.io/4817
//
// Enables fine-grained authorization checks for ResourceClaim status updates.
// Requires separate permission on resourceclaims/binding to update
// status.allocation and status.reservedFor, and per-driver permission on
// resourceclaims/driver using associated-node / arbitrary-node verb prefixes
// to update status.devices.
DRAResourceClaimGranularStatusAuthorization featuregate.Feature = "DRAResourceClaimGranularStatusAuthorization"
// owner: @nmn3m
// kep: http://kep.k8s.io/5677
//
@ -1390,6 +1400,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
DRAResourceClaimGranularStatusAuthorization: {
{Version: version.MustParse("1.36"), Default: true, PreRelease: featuregate.Beta},
},
DRAResourcePoolStatus: {
{Version: version.MustParse("1.36"), Default: false, PreRelease: featuregate.Alpha},
},
@ -2438,6 +2452,8 @@ var defaultKubernetesFeatureGateDependencies = map[featuregate.Feature][]feature
DRAResourceClaimDeviceStatus: {}, // Soft dependency on DynamicResourceAllocation due to on/off-by-default conflict.
DRAResourceClaimGranularStatusAuthorization: {DynamicResourceAllocation, DRAResourceClaimDeviceStatus},
DRAResourcePoolStatus: {DynamicResourceAllocation},
DRASchedulerFilterTimeout: {DynamicResourceAllocation},

View file

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/authentication/user"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/client-go/kubernetes/fake"
apitesting "k8s.io/kubernetes/pkg/api/testing"
@ -57,7 +58,7 @@ func testDeclarativeValidate(t *testing.T, apiVersion string) {
})
fakeClient := fake.NewClientset()
mockNSClient := fakeClient.CoreV1().Namespaces()
Strategy := NewStrategy(mockNSClient)
Strategy := NewStrategy(mockNSClient, nil)
opaqueDriverPath := field.NewPath("spec", "devices", "config").Index(0).Child("opaque", "driver")
@ -717,7 +718,7 @@ func testDeclarativeValidateUpdate(t *testing.T, apiVersion string) {
})
fakeClient := fake.NewClientset()
mockNSClient := fakeClient.CoreV1().Namespaces()
Strategy := NewStrategy(mockNSClient)
Strategy := NewStrategy(mockNSClient, nil)
validClaim := mkValidResourceClaim()
// TODO: As we accumulate more and more test cases, consider breaking this
// up into smaller tests for maintainability.
@ -835,7 +836,7 @@ func TestValidateStatusUpdateForDeclarative(t *testing.T) {
func testValidateStatusUpdateForDeclarative(t *testing.T, apiVersion string) {
fakeClient := fake.NewClientset()
mockNSClient := fakeClient.CoreV1().Namespaces()
Strategy := NewStrategy(mockNSClient)
Strategy := NewStrategy(mockNSClient, &fakeAuthorizer{true})
strategy := NewStatusStrategy(Strategy)
ctx := genericapirequest.WithRequestInfo(genericapirequest.NewDefaultContext(), &genericapirequest.RequestInfo{
@ -844,6 +845,8 @@ func testValidateStatusUpdateForDeclarative(t *testing.T, apiVersion string) {
Resource: "resourceclaims",
Subresource: "status",
})
ctx = genericapirequest.WithUser(ctx, &user.DefaultInfo{Name: "test-user"})
poolPath := field.NewPath("status", "allocation", "devices", "results").Index(0).Child("pool")
configSourcePath := field.NewPath("status", "allocation", "devices", "config").Index(0).Child("source")
driverPath := field.NewPath("status", "allocation", "devices", "results").Index(0).Child("driver")

View file

@ -22,10 +22,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/registry/generic"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/client-go/kubernetes/typed/core/v1"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
@ -40,11 +41,15 @@ type REST struct {
}
// NewREST returns a RESTStorage object that will work against ResourceClaims.
func NewREST(optsGetter generic.RESTOptionsGetter, nsClient v1.NamespaceInterface) (*REST, *StatusREST, error) {
func NewREST(optsGetter generic.RESTOptionsGetter, nsClient v1.NamespaceInterface, authorizer authorizer.Authorizer) (*REST, *StatusREST, error) {
if nsClient == nil {
return nil, nil, fmt.Errorf("namespace client is required")
}
strategy := resourceclaim.NewStrategy(nsClient)
if authorizer == nil {
return nil, nil, fmt.Errorf("authorizer is required")
}
strategy := resourceclaim.NewStrategy(nsClient, authorizer)
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.ResourceClaim{} },
NewListFunc: func() runtime.Object { return &resource.ResourceClaimList{} },

View file

@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
@ -25,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
@ -36,6 +39,13 @@ import (
"k8s.io/kubernetes/pkg/registry/registrytest"
)
type fakeAuthorizer struct {
}
func (f *fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
return authorizer.DecisionAllow, "ok", nil
}
func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
restOptions := generic.RESTOptions{
@ -46,7 +56,7 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
}
fakeClient := fake.NewSimpleClientset()
mockNSClient := fakeClient.CoreV1().Namespaces()
resourceClaimStorage, statusStorage, err := NewREST(restOptions, mockNSClient)
resourceClaimStorage, statusStorage, err := NewREST(restOptions, mockNSClient, &fakeAuthorizer{})
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
@ -159,6 +169,17 @@ func TestUpdateStatus(t *testing.T) {
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
ctx := genericapirequest.NewDefaultContext()
ctx = genericapirequest.WithUser(ctx, &user.DefaultInfo{Name: "system:serviceaccount:kube-system:test"})
ctx = genericapirequest.WithRequestInfo(ctx, &genericapirequest.RequestInfo{
IsResourceRequest: true,
Verb: "update",
APIGroup: "resource.k8s.io",
APIVersion: "v1",
Resource: "resourceclaims",
Subresource: "status",
Namespace: metav1.NamespaceDefault,
Name: "foo",
})
key, _ := storage.KeyFunc(ctx, "foo")
claimStart := validNewClaim("foo", metav1.NamespaceDefault)

View file

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
@ -50,15 +51,17 @@ import (
type resourceclaimStrategy struct {
runtime.ObjectTyper
names.NameGenerator
nsClient v1.NamespaceInterface
nsClient v1.NamespaceInterface
authorizer authorizer.Authorizer
}
// NewStrategy is the default logic that applies when creating and updating ResourceClaim objects.
func NewStrategy(nsClient v1.NamespaceInterface) *resourceclaimStrategy {
func NewStrategy(nsClient v1.NamespaceInterface, authorizer authorizer.Authorizer) *resourceclaimStrategy {
return &resourceclaimStrategy{
legacyscheme.Scheme,
names.SimpleNameGenerator,
nsClient,
authorizer,
}
}
@ -194,6 +197,17 @@ func (r *resourceclaimStatusStrategy) ValidateUpdate(ctx context.Context, obj, o
oldAllocationResult = oldClaim.Status.Allocation.Devices.Results
}
errs := resourceutils.AuthorizedForAdminStatus(ctx, newAllocationResult, oldAllocationResult, newClaim.Namespace, r.nsClient)
if utilfeature.DefaultFeatureGate.Enabled(features.DRAResourceClaimGranularStatusAuthorization) {
errs = append(errs, resourceutils.AuthorizedForBinding(ctx, field.NewPath("status", "allocation"), r.authorizer, newClaim.Status, oldClaim.Status)...)
// Only authorize driver device status if the claim is actually allocated since
// we drop all the device status when the claim is deallocated.
if utilfeature.DefaultFeatureGate.Enabled(features.DRAResourceClaimDeviceStatus) &&
newClaim.Status.Allocation != nil {
errs = append(errs, resourceutils.AuthorizedForDeviceStatus(ctx, field.NewPath("status", "devices"), r.authorizer, newClaim.Status, oldClaim.Status)...)
}
}
errs = append(errs, validation.ValidateResourceClaimStatusUpdate(newClaim, oldClaim)...)
return rest.ValidateDeclarativelyWithMigrationChecks(ctx, legacyscheme.Scheme, newClaim, oldClaim, errs, operation.Update)
}

View file

@ -17,6 +17,7 @@ limitations under the License.
package resourceclaim
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@ -26,12 +27,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/fake"
testclient "k8s.io/client-go/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
@ -374,6 +378,9 @@ var fieldImmutableError = "field is immutable"
var metadataError = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters"
var deviceRequestError = "exactly one of `exactly` or `firstAvailable` is required"
var constraintError = "matchAttribute: Required value"
var bindingUpdateError = `User "test-user" cannot update resource "resourceclaims/binding" in API group "resource.k8s.io" at the cluster scope: denied`
var deviceAssociatedNodeUpdateError = `User "system:serviceaccount:kube-system:dra-driver" cannot associated-node:update resource "resourceclaims/driver" in API group "resource.k8s.io" in the namespace "default": denied`
var deviceArbitraryNodeUpdateError = `User "test-user" cannot arbitrary-node:update resource "resourceclaims/driver" in API group "resource.k8s.io" in the namespace "default": denied`
const (
req0 = "req-0"
@ -384,6 +391,7 @@ const (
testDriver = "test-driver"
testPool = "test-pool"
testDevice = "test-device"
testUser = "test-user"
)
var (
@ -397,7 +405,7 @@ var testCapacity = map[resource.QualifiedName]apiresource.Quantity{
func TestStrategy(t *testing.T) {
fakeClient := fake.NewSimpleClientset()
mockNSClient := fakeClient.CoreV1().Namespaces()
strategy := NewStrategy(mockNSClient)
strategy := NewStrategy(mockNSClient, nil)
if !strategy.NamespaceScoped() {
t.Errorf("ResourceClaim must be namespace scoped")
}
@ -629,7 +637,7 @@ func TestStrategyCreate(t *testing.T) {
features.DRAPrioritizedList: tc.prioritizedList,
features.DRAConsumableCapacity: tc.consumableCapacity,
})
strategy := NewStrategy(mockNSClient)
strategy := NewStrategy(mockNSClient, nil)
obj := tc.obj.DeepCopy()
strategy.PrepareForCreate(ctx, obj)
@ -964,7 +972,7 @@ func TestStrategyUpdate(t *testing.T) {
features.DRAConsumableCapacity: tc.consumableCapacity,
})
strategy := NewStrategy(mockNSClient)
strategy := NewStrategy(mockNSClient, nil)
oldObj := tc.oldObj.DeepCopy()
newObj := tc.newObj.DeepCopy()
@ -996,15 +1004,30 @@ func TestStrategyUpdate(t *testing.T) {
func TestStatusStrategyUpdate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
ctx = genericapirequest.WithUser(ctx, &user.DefaultInfo{
Name: testUser,
Groups: []string{"system:authenticated"},
})
ctx = genericapirequest.WithRequestInfo(ctx, &genericapirequest.RequestInfo{
IsResourceRequest: true,
Verb: "update",
APIGroup: "resource.k8s.io",
APIVersion: "v1",
Resource: "resourceclaims",
Subresource: "status",
Namespace: metav1.NamespaceDefault,
})
testcases := map[string]struct {
oldObj *resource.ResourceClaim
newObj *resource.ResourceClaim
authz authorizer.Authorizer
ctxOverride func(context.Context) context.Context // if set, transforms the default ctx
adminAccess bool
deviceStatusFeatureGate bool
consumableCapacityFeatureGate bool
prioritizedListFeatureGate bool
bindingConditions bool
expectValidationError string
expectValidationErrors []string
expectObj *resource.ResourceClaim
verify func(*testing.T, []testclient.Action)
}{
@ -1025,7 +1048,7 @@ func TestStatusStrategyUpdate(t *testing.T) {
obj.Name += "-2"
return obj
}(),
expectValidationError: fieldImmutableError,
expectValidationErrors: []string{fieldImmutableError},
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no action to be taken")
@ -1082,10 +1105,10 @@ func TestStatusStrategyUpdate(t *testing.T) {
},
},
"keep-fields-admin-access-NonAdminNamespace": {
oldObj: objInNonAdminNamespace,
newObj: objWithAdminAccessStatusInNonAdminNamespace,
adminAccess: true,
expectValidationError: adminAccessError,
oldObj: objInNonAdminNamespace,
newObj: objWithAdminAccessStatusInNonAdminNamespace,
adminAccess: true,
expectValidationErrors: []string{adminAccessError},
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 1 {
t.Errorf("expected one action but got %d", len(as))
@ -1245,6 +1268,123 @@ func TestStatusStrategyUpdate(t *testing.T) {
}
},
},
"fail-update-fields-devices-status-without-permissions": {
oldObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
return obj
}(),
newObj: func() *resource.ResourceClaim { // Status is added
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
addStatusDevices(obj, testDriver, testPool, testDevice, nil)
return obj
}(),
adminAccess: true, // Keep emulation version at 1.36 so DRAResourceClaimGranularStatusAuthorization is active
deviceStatusFeatureGate: true,
expectValidationErrors: []string{deviceArbitraryNodeUpdateError},
expectObj: func() *resource.ResourceClaim { // Status is not updated
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
return obj
}(),
authz: &fakeAuthorizer{false},
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no action to be taken")
}
},
},
"fail-update-fields-devices-status-associated-node-without-permissions": {
oldObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
obj.Status.Allocation.NodeSelector = &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchFields: []core.NodeSelectorRequirement{{
Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"test-node"},
}},
}},
}
return obj
}(),
newObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
obj.Status.Allocation.NodeSelector = &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchFields: []core.NodeSelectorRequirement{{
Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"test-node"},
}},
}},
}
addStatusDevices(obj, testDriver, testPool, testDevice, nil)
return obj
}(),
ctxOverride: func(ctx context.Context) context.Context {
return genericapirequest.WithUser(ctx, &user.DefaultInfo{
Name: "system:serviceaccount:kube-system:dra-driver",
Groups: []string{"system:authenticated"},
Extra: map[string][]string{"authentication.kubernetes.io/node-name": {"test-node"}},
})
},
adminAccess: true,
deviceStatusFeatureGate: true,
authz: &fakeAuthorizer{false},
expectValidationErrors: []string{deviceAssociatedNodeUpdateError},
expectObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
obj.Status.Allocation.NodeSelector = &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchFields: []core.NodeSelectorRequirement{{
Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"test-node"},
}},
}},
}
return obj
}(),
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no action to be taken")
}
},
},
"fail-drop-status-deallocated-device-without-permissions": {
oldObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusAllocationDevicesResults(obj, testDriver, testPool, testDevice, testRequest, nil, nil)
addStatusDevices(obj, testDriver, testPool, testDevice, nil)
return obj
}(),
newObj: func() *resource.ResourceClaim { // device is deallocated
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
addStatusDevices(obj, testDriver, testPool, testDevice, nil)
return obj
}(),
adminAccess: true, // Keep emulation version at 1.36 so DRAResourceClaimGranularStatusAuthorization is active
authz: &fakeAuthorizer{false},
deviceStatusFeatureGate: true,
expectValidationErrors: []string{bindingUpdateError},
expectObj: func() *resource.ResourceClaim { // Status is no longer there
obj := obj.DeepCopy()
addSpecDevicesRequest(obj, testRequest)
return obj
}(),
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no action to be taken")
}
},
},
"drop-status-deallocated-device-disable-feature-gate": {
oldObj: func() *resource.ResourceClaim {
obj := obj.DeepCopy()
@ -1562,7 +1702,11 @@ func TestStatusStrategyUpdate(t *testing.T) {
t.Run(name, func(t *testing.T) {
fakeClient := fake.NewSimpleClientset(ns1, ns2)
mockNSClient := fakeClient.CoreV1().Namespaces()
strategy := NewStrategy(mockNSClient)
authz := tc.authz
if tc.authz == nil {
authz = &fakeAuthorizer{true}
}
strategy := NewStrategy(mockNSClient, authz)
if !tc.adminAccess {
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, version.MustParse("1.35"))
@ -1580,20 +1724,27 @@ func TestStatusStrategyUpdate(t *testing.T) {
})
statusStrategy := NewStatusStrategy(strategy)
ctx := ctx
if tc.ctxOverride != nil {
ctx = tc.ctxOverride(ctx)
}
oldObj := tc.oldObj.DeepCopy()
newObj := tc.newObj.DeepCopy()
newObj.ResourceVersion = "4"
statusStrategy.PrepareForUpdate(ctx, newObj, oldObj)
if errs := statusStrategy.ValidateUpdate(ctx, newObj, oldObj); len(errs) != 0 {
if tc.expectValidationError == "" {
if len(tc.expectValidationErrors) == 0 {
t.Fatalf("unexpected error(s): %v", errs)
}
assert.Len(t, errs, 1, "exactly one error expected")
assert.ErrorContains(t, errs[0], tc.expectValidationError, "the error message should have contained the expected error message")
assert.Len(t, errs, len(tc.expectValidationErrors), "wrong number of validation errors")
for i, expectValidationError := range tc.expectValidationErrors {
assert.ErrorContains(t, errs[i], expectValidationError, "the error message should have contained the expected error message")
}
return
}
if tc.expectValidationError != "" {
if len(tc.expectValidationErrors) != 0 {
t.Fatal("expected validation error(s), got none")
}
if warnings := statusStrategy.WarningsOnUpdate(ctx, newObj, oldObj); len(warnings) != 0 {
@ -1661,3 +1812,14 @@ func addStatusDevices(resourceClaim *resource.ResourceClaim, driver string, pool
ShareID: (*string)(shareID),
})
}
type fakeAuthorizer struct {
verdict bool
}
func (f *fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
if !f.verdict {
return authorizer.DecisionDeny, "denied", nil
}
return authorizer.DecisionAllow, "default accept", nil
}

View file

@ -21,6 +21,7 @@ import (
resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
resourcev1beta1 "k8s.io/api/resource/v1beta1"
resourcev1beta2 "k8s.io/api/resource/v1beta2"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
@ -42,6 +43,7 @@ import (
type RESTStorageProvider struct {
NamespaceClient v1.NamespaceInterface
Authorizer authorizer.Authorizer
}
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, error) {
@ -88,7 +90,7 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API
}
if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1.SchemeGroupVersion.WithResource(resource)) {
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient)
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient, p.Authorizer)
if err != nil {
return nil, err
}
@ -151,7 +153,7 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag
}
if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1beta1.SchemeGroupVersion.WithResource(resource)) {
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient)
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient, p.Authorizer)
if err != nil {
return nil, err
}
@ -199,7 +201,7 @@ func (p RESTStorageProvider) v1beta2Storage(apiResourceConfigSource serverstorag
}
if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1beta2.SchemeGroupVersion.WithResource(resource)) {
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient)
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter, nsClient, p.Authorizer)
if err != nil {
return nil, err
}

View file

@ -20,9 +20,19 @@ import (
"context"
"fmt"
resourcev1 "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/filters"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/resource"
)
@ -102,3 +112,221 @@ func adminRequested(deviceRequestResults []resource.DeviceRequestAllocationResul
}
return false, nil
}
// AuthorizedForBinding checks if the caller is authorized to update
// status.allocation and status.reservedFor by verifying permission on the
// synthetic resourceclaims/binding subresource.
func AuthorizedForBinding(ctx context.Context, fieldPath *field.Path, authz authorizer.Authorizer, newStatus, oldStatus resource.ResourceClaimStatus) field.ErrorList {
var allErrs field.ErrorList
if equality.Semantic.DeepEqual(newStatus.Allocation, oldStatus.Allocation) &&
equality.Semantic.DeepEqual(newStatus.ReservedFor, oldStatus.ReservedFor) {
return allErrs
}
baseAttrs, err := filters.GetAuthorizerAttributes(ctx)
if err != nil {
return append(allErrs, field.InternalError(fieldPath, fmt.Errorf("cannot build authorizer attributes: %w", err)))
}
attrs := &syntheticSubresourceAttrs{
Attributes: baseAttrs,
verb: baseAttrs.GetVerb(), // verb is unchanged but must be specified
subresource: resourcev1.SubresourceBinding, // the scheduler calls this "bind claim"
namespace: "", // cluster-wide
name: "", // all names
}
if err := checkAuthorization(ctx, authz, attrs); err != nil {
return append(allErrs, field.Forbidden(fieldPath, fmt.Sprintf(`changing status.allocation or status.reservedFor requires resource="resourceclaims/binding", verb="%s" permission: %s`, attrs.verb, err)))
}
return allErrs
}
// AuthorizedForDeviceStatus checks if the caller is authorized to update
// status.devices by performing per-driver authorization checks using the
// associated-node / arbitrary-node verb prefix pattern on the synthetic
// resourceclaims/driver subresource.
func AuthorizedForDeviceStatus(ctx context.Context, fieldPath *field.Path, a authorizer.Authorizer, newStatus, oldStatus resource.ResourceClaimStatus) field.ErrorList {
var allErrs field.ErrorList
driversToAuthz := getModifiedDrivers(newStatus, oldStatus)
if len(driversToAuthz) == 0 {
return allErrs
}
baseAttrs, err := filters.GetAuthorizerAttributes(ctx)
if err != nil {
return append(allErrs, field.InternalError(fieldPath, fmt.Errorf("cannot build authorizer attributes: %w", err)))
}
// if service account is on the same node as the claim, check associated-node verb first, fall back to arbitrary-node
// Otherwise, only try arbitrary-node
requestVerb := baseAttrs.GetVerb()
var verbs []string
if saAssociatedWithAllocatedNode(baseAttrs.GetUser(), nodeNameFromAllocation(newStatus.Allocation)) {
verbs = []string{resourcev1.VerbPrefixAssociatedNode + requestVerb, resourcev1.VerbPrefixArbitraryNode + requestVerb}
} else {
verbs = []string{resourcev1.VerbPrefixArbitraryNode + requestVerb}
}
for _, driverName := range sets.List(driversToAuthz) {
if err := checkDriverAuthorization(ctx, baseAttrs, verbs, driverName, a); err != nil {
allErrs = append(allErrs, field.Forbidden(fieldPath, fmt.Sprintf(`changing status.devices requires resource="resourceclaims/driver", verb="%s" permission: %s`, verbs, err)))
}
}
return allErrs
}
func checkDriverAuthorization(ctx context.Context, baseAttrs authorizer.Attributes, verbs []string, driverName string, a authorizer.Authorizer) error {
if len(verbs) == 0 {
return fmt.Errorf("no verbs set for driver %s", driverName) // impossible for all inputs today
}
var firstErr error
for _, verb := range verbs { // verbs are OR'd with each other
attrs := &syntheticSubresourceAttrs{
Attributes: baseAttrs,
verb: verb,
subresource: resourcev1.SubresourceDriver,
namespace: baseAttrs.GetNamespace(),
name: driverName,
}
err := checkAuthorization(ctx, a, attrs)
if err == nil {
return nil
}
if firstErr == nil {
firstErr = err
}
}
return firstErr
}
// getModifiedDrivers identifies all drivers whose status entries were added,
// removed, or changed between the old and new ResourceClaim objects.
func getModifiedDrivers(newAllocatedDeviceStatus, oldAllocatedDeviceStatus resource.ResourceClaimStatus) sets.Set[string] {
driversToAuthz := sets.Set[string]{}
oldDevices := make(map[deviceKey]resource.AllocatedDeviceStatus)
for _, d := range oldAllocatedDeviceStatus.Devices {
oldDevices[makeDeviceKey(d)] = d
}
// Check for new or modified device entries
for _, d := range newAllocatedDeviceStatus.Devices {
key := makeDeviceKey(d)
oldDevice, ok := oldDevices[key]
delete(oldDevices, key) // Remove from map to track processed devices
// If entry is new or changed, we need to authorize this driver.
if !ok || !equality.Semantic.DeepEqual(oldDevice, d) {
driversToAuthz.Insert(d.Driver)
}
}
// Check for removed device entries
for _, d := range oldDevices {
// Any remaining device in oldDevices was removed in rcNew.
driversToAuthz.Insert(d.Driver)
}
return driversToAuthz
}
type deviceKey struct {
driver string
pool string
device string
shareID string
}
func makeDeviceKey(d resource.AllocatedDeviceStatus) deviceKey {
key := deviceKey{
driver: d.Driver,
pool: d.Pool,
device: d.Device,
}
if d.ShareID != nil {
key.shareID = *d.ShareID
}
return key
}
func nodeNameFromAllocation(allocation *resource.AllocationResult) string {
if allocation == nil || allocation.NodeSelector == nil {
return ""
}
ns := allocation.NodeSelector
if len(ns.NodeSelectorTerms) != 1 {
return ""
}
term := ns.NodeSelectorTerms[0]
if len(term.MatchExpressions) != 0 || len(term.MatchFields) != 1 {
return ""
}
f := term.MatchFields[0]
if f.Key != "metadata.name" || f.Operator != core.NodeSelectorOpIn || len(f.Values) != 1 {
return ""
}
return f.Values[0]
}
func saAssociatedWithAllocatedNode(u user.Info, allocatedNodeName string) bool {
if len(allocatedNodeName) == 0 {
return false
}
// Must be a ServiceAccount
if _, _, err := serviceaccount.SplitUsername(u.GetName()); err != nil {
return false
}
// Must have exactly one node-name extra attribute
nodeNames := u.GetExtra()[serviceaccount.NodeNameKey]
if len(nodeNames) != 1 {
return false
}
nodeName := nodeNames[0]
// Must be a valid node name format
if len(validation.ValidateNodeName(nodeName, false)) != 0 {
return false
}
return nodeName == allocatedNodeName
}
func checkAuthorization(ctx context.Context, a authorizer.Authorizer, attributes authorizer.Attributes) error {
authorized, reason, err := a.Authorize(ctx, attributes)
// an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here.
if authorized == authorizer.DecisionAllow {
return nil
}
msg := reason
switch {
case err != nil && len(reason) > 0:
msg = fmt.Sprintf("%v: %s", err, reason)
case err != nil:
msg = err.Error()
}
return responsewriters.ForbiddenStatusError(attributes, msg)
}
type syntheticSubresourceAttrs struct {
authorizer.Attributes
verb string
subresource string
namespace string
name string
}
func (a *syntheticSubresourceAttrs) GetVerb() string { return a.verb }
func (a *syntheticSubresourceAttrs) GetSubresource() string { return a.subresource }
func (a *syntheticSubresourceAttrs) GetNamespace() string { return a.namespace }
func (a *syntheticSubresourceAttrs) GetName() string { return a.name }

View file

@ -0,0 +1,740 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"fmt"
"net/http"
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
)
// TestGetModifiedDrivers contains the unit tests for the getModifiedDrivers function.
func TestGetModifiedDrivers(t *testing.T) {
// Helper to create AllocatedDeviceStatus
devStatus := func(driver, pool, device string, network *resource.NetworkDeviceData) resource.AllocatedDeviceStatus {
return resource.AllocatedDeviceStatus{
Driver: driver,
Pool: pool,
Device: device,
NetworkData: network,
}
}
// Helper to create ResourceClaimStatus
claimStatus := func(devices ...resource.AllocatedDeviceStatus) resource.ResourceClaimStatus {
return resource.ResourceClaimStatus{
Devices: devices,
}
}
testCases := map[string]struct {
newStatus resource.ResourceClaimStatus
oldStatus resource.ResourceClaimStatus
expected sets.Set[string]
}{
"no changes": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-b", "pool-1", "dev-2", nil),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-b", "pool-1", "dev-2", nil),
),
expected: sets.Set[string]{},
},
"add one device": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-b", "pool-1", "dev-2", nil), // New
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
expected: sets.New[string]("driver-b"),
},
"add device for existing driver": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-a", "pool-1", "dev-2", nil), // New
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
expected: sets.New[string]("driver-a"),
},
"remove one device": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-b", "pool-1", "dev-2", nil), // Removed
),
expected: sets.New[string]("driver-b"),
},
"remove device for driver that still has other devices": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
devStatus("driver-a", "pool-1", "dev-2", nil), // Removed
),
expected: sets.New[string]("driver-a"),
},
"modify one device": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0", IPs: []string{"192.168.7.1/24"}}), // Modified
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0"}),
),
expected: sets.New[string]("driver-a"),
},
"modify device for driver, no change for other driver": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0", IPs: []string{"192.168.7.1/24"}}), // Modified
devStatus("driver-b", "pool-1", "dev-2", nil),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0"}),
devStatus("driver-b", "pool-1", "dev-2", nil),
),
expected: sets.New[string]("driver-a"),
},
"complex change (add, remove, modify)": {
newStatus: claimStatus(
// driver-a: dev-1 modified
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0", IPs: []string{"192.168.7.1/24"}}), // Modified
// driver-b: dev-2 unchanged
devStatus("driver-b", "pool-1", "dev-2", nil),
// driver-c: dev-3 added
devStatus("driver-c", "pool-1", "dev-3", nil),
),
oldStatus: claimStatus(
// driver-a: dev-1 old state
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0"}),
// driver-b: dev-2 unchanged
devStatus("driver-b", "pool-1", "dev-2", nil),
// driver-d: dev-4 removed
devStatus("driver-d", "pool-1", "dev-4", nil),
),
expected: sets.New[string]("driver-a", "driver-c", "driver-d"),
},
"empty to empty": {
newStatus: claimStatus(),
oldStatus: claimStatus(),
expected: sets.Set[string]{},
},
"empty to one device": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
oldStatus: claimStatus(),
expected: sets.New[string]("driver-a"),
},
"one device to empty": {
newStatus: claimStatus(),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", nil),
),
expected: sets.New[string]("driver-a"),
},
"replace device with same key but different content": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0", IPs: []string{"192.168.7.1/24"}}),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-1", &resource.NetworkDeviceData{InterfaceName: "eth0"}),
),
expected: sets.New[string]("driver-a"),
},
"replace device with different key for same driver": {
newStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-NEW", nil),
),
oldStatus: claimStatus(
devStatus("driver-a", "pool-1", "dev-OLD", nil),
),
expected: sets.New[string]("driver-a"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
result := getModifiedDrivers(tc.newStatus, tc.oldStatus)
if !reflect.DeepEqual(result, tc.expected) {
t.Errorf("Expected driver set %v, but got %v", tc.expected, result)
}
})
}
}
// fakeAuthorizer records authorization calls and returns preconfigured decisions.
// The key format is "verb/resource/subresource/name".
type fakeAuthorizer struct {
rules map[string]authorizer.Decision
err error
callCounts map[string]int
}
func (f *fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
if f.callCounts == nil {
f.callCounts = make(map[string]int)
}
key := fmt.Sprintf("%s/%s/%s/%s", a.GetVerb(), a.GetResource(), a.GetSubresource(), a.GetName())
f.callCounts[key]++
if f.err != nil {
return authorizer.DecisionDeny, "forced error", f.err
}
if decision, ok := f.rules[key]; ok {
return decision, "", nil
}
return authorizer.DecisionDeny, "no rule matched", nil
}
// withRequestContext builds a context with user info and request info set,
// simulating what GetAuthorizerAttributes expects.
func withRequestContext(ctx context.Context, u user.Info, verb string) context.Context {
ctx = genericapirequest.WithUser(ctx, u)
ctx = genericapirequest.WithRequestInfo(ctx, &genericapirequest.RequestInfo{
IsResourceRequest: true,
Verb: verb,
APIGroup: "resource.k8s.io",
APIVersion: "v1",
Resource: "resourceclaims",
Subresource: "status",
Namespace: "default",
Name: "test-claim",
})
// GetAuthorizerAttributes also needs an http.Request in context for audit, but
// the function doesn't fail without it — we simulate by using a dummy request.
req, _ := http.NewRequestWithContext(ctx, http.MethodPut, "/", nil)
ctx = req.Context()
return ctx
}
func singleNodeAllocation(nodeName string) *resource.AllocationResult {
return &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{
MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{nodeName}},
},
},
},
},
}
}
func TestAuthorizedForBinding(t *testing.T) {
saName := "system:serviceaccount:kube-system:scheduler"
testUser := &user.DefaultInfo{Name: saName}
fp := field.NewPath("status")
testcases := []struct {
name string
newStatus resource.ResourceClaimStatus
oldStatus resource.ResourceClaimStatus
authz *fakeAuthorizer
expectErrs int
}{
{
name: "no allocation or reservedFor change, no check needed",
newStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: "d", Pool: "p", Device: "dev"}},
},
oldStatus: resource.ResourceClaimStatus{},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
expectErrs: 0,
},
{
name: "allocation changed, authorized",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation("node-1"),
},
oldStatus: resource.ResourceClaimStatus{},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
"update/resourceclaims/binding/": authorizer.DecisionAllow,
}},
expectErrs: 0,
},
{
name: "allocation changed, not authorized",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation("node-1"),
},
oldStatus: resource.ResourceClaimStatus{},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
expectErrs: 1,
},
{
name: "reservedFor changed, authorized",
newStatus: resource.ResourceClaimStatus{
ReservedFor: []resource.ResourceClaimConsumerReference{{Resource: "pods", Name: "pod-1", UID: "uid-1"}},
},
oldStatus: resource.ResourceClaimStatus{},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
"update/resourceclaims/binding/": authorizer.DecisionAllow,
}},
expectErrs: 0,
},
{
name: "reservedFor changed, not authorized",
newStatus: resource.ResourceClaimStatus{
ReservedFor: []resource.ResourceClaimConsumerReference{{Resource: "pods", Name: "pod-1", UID: "uid-1"}},
},
oldStatus: resource.ResourceClaimStatus{},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
expectErrs: 1,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx := withRequestContext(context.Background(), testUser, "update")
errs := AuthorizedForBinding(ctx, fp, tc.authz, tc.newStatus, tc.oldStatus)
if len(errs) != tc.expectErrs {
t.Errorf("expected %d errors, got %d: %v", tc.expectErrs, len(errs), errs)
}
})
}
}
func TestAuthorizedForDeviceStatus(t *testing.T) {
saName := "system:serviceaccount:kube-system:dra-driver"
nodeName := "test-node"
driverName := "test-driver"
fp := field.NewPath("status", "devices")
testcases := []struct {
name string
newStatus resource.ResourceClaimStatus
oldStatus resource.ResourceClaimStatus
user user.Info
authz *fakeAuthorizer
verb string
expectErrs int
}{
{
name: "no drivers modified",
newStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{
{Driver: driverName, Pool: "pool", Device: "device"},
},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{
{Driver: driverName, Pool: "pool", Device: "device"},
},
},
user: &user.DefaultInfo{Name: saName},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
verb: "update",
expectErrs: 0,
},
{
name: "associated-node: SA on same node, allowed by associated-node verb",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("associated-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 0,
},
{
name: "associated-node: SA on same node, allowed by arbitrary-node fallback",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("arbitrary-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 0,
},
{
name: "associated-node: SA on same node, neither verb allowed",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
verb: "update",
expectErrs: 1,
},
{
name: "SA on different node, only arbitrary-node checked",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation("other-node"),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("arbitrary-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 0,
},
{
name: "SA on different node, associated-node not checked, denied",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation("other-node"),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
// Only grant associated-node, which should NOT be checked since nodes differ
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("associated-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 1,
},
{
name: "no node association (controller), only arbitrary-node checked, allowed",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName}, // no node-name extra
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("arbitrary-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 0,
},
{
name: "no node association (controller), denied",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{}},
verb: "update",
expectErrs: 1,
},
{
name: "multi-node claim (no single node in selector), only arbitrary-node",
newStatus: resource.ResourceClaimStatus{
Allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-a", "node-b"}},
}},
},
},
},
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {"node-a"}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("arbitrary-node:update/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "update",
expectErrs: 0,
},
{
name: "patch verb propagated",
newStatus: resource.ResourceClaimStatus{
Allocation: singleNodeAllocation(nodeName),
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-new"}},
},
oldStatus: resource.ResourceClaimStatus{
Devices: []resource.AllocatedDeviceStatus{{Driver: driverName, Pool: "pool", Device: "dev-old"}},
},
user: &user.DefaultInfo{Name: saName, Extra: map[string][]string{serviceaccount.NodeNameKey: {nodeName}}},
authz: &fakeAuthorizer{rules: map[string]authorizer.Decision{
fmt.Sprintf("associated-node:patch/resourceclaims/driver/%s", driverName): authorizer.DecisionAllow,
}},
verb: "patch",
expectErrs: 0,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx := withRequestContext(context.Background(), tc.user, tc.verb)
errs := AuthorizedForDeviceStatus(ctx, fp, tc.authz, tc.newStatus, tc.oldStatus)
if len(errs) != tc.expectErrs {
t.Errorf("expected %d errors, got %d: %v", tc.expectErrs, len(errs), errs)
}
})
}
}
func TestNodeNameFromAllocation(t *testing.T) {
testCases := []struct {
name string
allocation *resource.AllocationResult
expected string
}{
{
name: "nil allocation",
allocation: nil,
expected: "",
},
{
name: "nil node selector",
allocation: &resource.AllocationResult{},
expected: "",
},
{
name: "exact single-node match",
allocation: singleNodeAllocation("worker-1"),
expected: "worker-1",
},
{
name: "multiple values",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-a", "node-b"}},
}},
},
},
},
expected: "",
},
{
name: "match expressions instead of match fields",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchExpressions: []core.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: core.NodeSelectorOpIn, Values: []string{"node-1"}},
}},
},
},
},
expected: "",
},
{
name: "multiple terms",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-1"}},
}},
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-2"}},
}},
},
},
},
expected: "",
},
{
name: "wrong key",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.namespace", Operator: core.NodeSelectorOpIn, Values: []string{"node-1"}},
}},
},
},
},
expected: "",
},
{
name: "wrong operator",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpNotIn, Values: []string{"node-1"}},
}},
},
},
},
expected: "",
},
{
name: "extra match fields",
allocation: &resource.AllocationResult{
NodeSelector: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{
{MatchFields: []core.NodeSelectorRequirement{
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-1"}},
{Key: "metadata.name", Operator: core.NodeSelectorOpIn, Values: []string{"node-1"}},
}},
},
},
},
expected: "",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := nodeNameFromAllocation(tc.allocation)
if result != tc.expected {
t.Errorf("expected %q, got %q", tc.expected, result)
}
})
}
}
func TestSAAssociatedWithAllocatedNode(t *testing.T) {
validSA := &user.DefaultInfo{
Name: "system:serviceaccount:default:dra-driver-sa",
Extra: map[string][]string{
serviceaccount.NodeNameKey: {"worker-node-1"},
},
}
testCases := []struct {
name string
userInfo user.Info
allocatedNodeName string
expected bool
}{
{
name: "SA on matching node",
userInfo: validSA,
allocatedNodeName: "worker-node-1",
expected: true,
},
{
name: "SA on different node",
userInfo: validSA,
allocatedNodeName: "worker-node-2",
expected: false,
},
{
name: "empty allocated node name",
userInfo: validSA,
allocatedNodeName: "",
expected: false,
},
{
name: "not a service account (kubelet identity)",
userInfo: &user.DefaultInfo{
Name: "system:node:worker-node-1",
Extra: map[string][]string{
serviceaccount.NodeNameKey: {"worker-node-1"},
},
},
allocatedNodeName: "worker-node-1",
expected: false,
},
{
name: "not a service account (regular user)",
userInfo: &user.DefaultInfo{
Name: "jane-doe",
Extra: map[string][]string{
serviceaccount.NodeNameKey: {"worker-node-1"},
},
},
allocatedNodeName: "worker-node-1",
expected: false,
},
{
name: "service account missing node name extra attribute",
userInfo: &user.DefaultInfo{
Name: "system:serviceaccount:default:dra-driver-sa",
Extra: map[string][]string{},
},
allocatedNodeName: "worker-node-1",
expected: false,
},
{
name: "service account with multiple node names in extra attribute",
userInfo: &user.DefaultInfo{
Name: "system:serviceaccount:default:dra-driver-sa",
Extra: map[string][]string{
serviceaccount.NodeNameKey: {"worker-node-1", "worker-node-2"},
},
},
allocatedNodeName: "worker-node-1",
expected: false,
},
{
name: "service account with invalid node name format",
userInfo: &user.DefaultInfo{
Name: "system:serviceaccount:default:dra-driver-sa",
Extra: map[string][]string{
serviceaccount.NodeNameKey: {"invalid_node_name!"},
},
},
allocatedNodeName: "invalid_node_name!",
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := saAssociatedWithAllocatedNode(tc.userInfo, tc.allocatedNodeName)
if result != tc.expected {
t.Errorf("Expected %v, got %v", tc.expected, result)
}
})
}
}

View file

@ -215,6 +215,11 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
rbacv1helpers.NewRule("update", "patch").Groups(schedulingGroup).Resources("podgroups/status").RuleOrDie(),
)
}
if utilfeature.DefaultFeatureGate.Enabled(features.DRAResourceClaimGranularStatusAuthorization) {
rules = append(rules,
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims/binding").RuleOrDie(),
)
}
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resource-claim-controller"},
Rules: rules,

View file

@ -657,6 +657,11 @@ func ClusterRoles() []rbacv1.ClusterRole {
if utilfeature.DefaultFeatureGate.Enabled(features.DRADeviceTaintRules) {
kubeSchedulerRules = append(kubeSchedulerRules, rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("devicetaintrules").RuleOrDie())
}
if utilfeature.DefaultFeatureGate.Enabled(features.DRAResourceClaimGranularStatusAuthorization) {
kubeSchedulerRules = append(kubeSchedulerRules,
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims/binding").RuleOrDie(),
)
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.GenericWorkload) {
kubeSchedulerRules = append(kubeSchedulerRules, rbacv1helpers.NewRule(Read...).Groups(schedulingGroup).Resources("podgroups").RuleOrDie())

View file

@ -997,6 +997,13 @@ items:
- get
- list
- watch
- apiGroups:
- resource.k8s.io
resources:
- resourceclaims/binding
verbs:
- patch
- update
- apiGroups:
- scheduling.k8s.io
resources:

View file

@ -951,6 +951,13 @@ items:
verbs:
- create
- delete
- apiGroups:
- resource.k8s.io
resources:
- resourceclaims/binding
verbs:
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:

View file

@ -1309,6 +1309,13 @@ items:
- create
- patch
- update
- apiGroups:
- resource.k8s.io
resources:
- resourceclaims/binding
verbs:
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:

View file

@ -47,6 +47,23 @@ const (
// in pod.Spec.Resources.Requests, in that case, a valid name has to be specified
// explicitly in device class.
ResourceDeviceClassPrefix string = "deviceclass.resource.kubernetes.io/"
// The constants below are all related to synthetic authorization checks for resourceclaims.status writes.
// SubresourceBinding is the synthetic subresource used for authorization
// of updates to status.allocation and status.reservedFor.
SubresourceBinding = "binding"
// SubresourceDriver is the synthetic subresource used for per-driver
// authorization of updates to status.devices.
SubresourceDriver = "driver"
// VerbPrefixAssociatedNode is the verb prefix for requests from a service account
// on the same node as the claim's allocation. The full verb is
// "associated-node:<request-verb>", e.g. "associated-node:update".
VerbPrefixAssociatedNode = "associated-node:"
// VerbPrefixArbitraryNode is the verb prefix for requests not associated
// with a specific node (controllers, etc.). The full verb is
// "arbitrary-node:<request-verb>", e.g. "arbitrary-node:update".
VerbPrefixArbitraryNode = "arbitrary-node:"
)
// +genclient

View file

@ -65,6 +65,7 @@
| DRAPartitionableDevices | :ballot_box_with_check:&nbsp;1.36+ | | 1.331.35 | 1.36 | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAPartitionableDevices%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAPartitionableDevices%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRAPrioritizedList | :ballot_box_with_check:&nbsp;1.34+ | | 1.33 | 1.341.35 | 1.36 | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAPrioritizedList%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAPrioritizedList%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRAResourceClaimDeviceStatus | :ballot_box_with_check:&nbsp;1.33+ | | 1.32 | 1.33 | | | | [code](https://cs.k8s.io/?q=%5CbDRAResourceClaimDeviceStatus%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAResourceClaimDeviceStatus%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRAResourceClaimGranularStatusAuthorization | :ballot_box_with_check:&nbsp;1.36+ | | | 1.36 | | | DRAResourceClaimDeviceStatus<br>DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAResourceClaimGranularStatusAuthorization%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAResourceClaimGranularStatusAuthorization%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRAResourcePoolStatus | | | 1.36 | | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRAResourcePoolStatus%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAResourcePoolStatus%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRASchedulerFilterTimeout | :ballot_box_with_check:&nbsp;1.34+ | | | 1.34 | | | DynamicResourceAllocation | [code](https://cs.k8s.io/?q=%5CbDRASchedulerFilterTimeout%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRASchedulerFilterTimeout%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |
| DRAWorkloadResourceClaims | | | 1.36 | | | | DynamicResourceAllocation<br>GenericWorkload | [code](https://cs.k8s.io/?q=%5CbDRAWorkloadResourceClaims%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/kubernetes) [KEPs](https://cs.k8s.io/?q=%5CbDRAWorkloadResourceClaims%5Cb&i=nope&files=&excludeFiles=CHANGELOG&repos=kubernetes/enhancements) |

View file

@ -639,6 +639,12 @@
lockToDefault: false
preRelease: Beta
version: "1.33"
- name: DRAResourceClaimGranularStatusAuthorization
versionedSpecs:
- default: true
lockToDefault: false
preRelease: Beta
version: "1.36"
- name: DRAResourcePoolStatus
versionedSpecs:
- default: false

View file

@ -33,6 +33,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
resourceapi "k8s.io/api/resource/v1"
resourcealphaapi "k8s.io/api/resource/v1alpha3"
resourcev1beta1 "k8s.io/api/resource/v1beta1"
@ -44,6 +45,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
applyv1 "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/events"
@ -3112,7 +3114,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
}).Should(gomega.MatchError(gomega.ContainSubstring("exceeded quota: object-count, requested: count/resourceclaims.resource.k8s.io=1, used: count/resourceclaims.resource.k8s.io=1, limited: count/resourceclaims.resource.k8s.io=1")), "creating second claim not allowed")
})
f.It("must be possible for the driver to update the ResourceClaim.Status.Devices once allocated", f.WithFeatureGate(features.DRAResourceClaimDeviceStatus), func(ctx context.Context) {
f.It("must be impossible for a node ServiceAccount to update the non-node ResourceClaim.Status.Devices once allocated", f.WithFeatureGate(features.DRAResourceClaimDeviceStatus), func(ctx context.Context) {
tCtx := f.TContext(ctx)
claim := b.ExternalClaim()
pod := b.PodExternal(claim.Name)
@ -3154,17 +3156,163 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
if !ok {
framework.Failf("pod got scheduled to node %s without a plugin", scheduledPod.Spec.NodeName)
}
updatedResourceClaim, err := plugin.UpdateStatus(ctx, allocatedResourceClaim)
_, err = plugin.UpdateStatus(ctx, allocatedResourceClaim)
gomega.Expect(apierrors.IsInvalid(err)).To(gomega.BeTrueBecause("expected invalid error: %v", err))
gomega.Expect(err.Error()).To(gomega.ContainSubstring("cannot arbitrary-node:update"))
})
f.It("must be possible for a control-plane ServiceAccount to update the ResourceClaim.Status.Devices once allocated", f.WithFeatureGate(features.DRAResourceClaimDeviceStatus), func(ctx context.Context) {
tCtx := f.TContext(ctx)
claim := b.ExternalClaim()
pod := b.PodExternal(claim.Name)
b.Create(tCtx, claim, pod)
// Waits for the ResourceClaim to be allocated and the pod to be scheduled.
b.TestPod(tCtx, pod)
ginkgo.By("Creating a ServiceAccount and Role to update ResourceClaim status")
saName := "claim-status-updater-sa"
roleName := "claim-status-updater-role"
bindingName := "claim-status-updater-binding"
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: f.Namespace.Name},
}
_, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, sa, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Create ClusterRole with 'update' permission on 'resourceclaims/status'
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: roleName, Namespace: f.Namespace.Name},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{resourceapi.SchemeGroupVersion.Group},
Resources: []string{"resourceclaims/status"},
Verbs: []string{"update", "patch"},
},
{
// Also need 'get' to fetch the claim for the update operation
APIGroups: []string{resourceapi.SchemeGroupVersion.Group},
Resources: []string{"resourceclaims"},
Verbs: []string{"get"},
},
},
}
_, err = f.ClientSet.RbacV1().ClusterRoles().Create(ctx, role, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := f.ClientSet.RbacV1().ClusterRoles().Delete(ctx, roleName, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
})
// Create ClusterRoleBinding
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: bindingName, Namespace: f.Namespace.Name},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: saName,
Namespace: f.Namespace.Name,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: roleName,
APIGroup: rbacv1.GroupName,
},
}
_, err = f.ClientSet.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := f.ClientSet.RbacV1().ClusterRoleBindings().Delete(ctx, bindingName, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
})
// Create a new clientset impersonating the ServiceAccount
saClientConfig := rest.CopyConfig(f.ClientConfig())
saClientConfig.Impersonate = rest.ImpersonationConfig{
UserName: fmt.Sprintf("system:serviceaccount:%s:%s", f.Namespace.Name, saName),
}
saClient, err := kubernetes.NewForConfig(saClientConfig)
framework.ExpectNoError(err)
// Get the allocated claim using the admin client
allocatedResourceClaim, err := f.ClientSet.ResourceV1().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(allocatedResourceClaim).ToNot(gomega.BeNil())
gomega.Expect(allocatedResourceClaim.Status.Allocation).ToNot(gomega.BeNil())
gomega.Expect(allocatedResourceClaim.Status.Allocation.Devices.Results).To(gomega.HaveLen(1))
ginkgo.By("Setting the device status a first time (via ServiceAccount without proper RBAC) for driver " + b.DriverName())
allocatedResourceClaim.Status.Devices = append(allocatedResourceClaim.Status.Devices,
resourceapi.AllocatedDeviceStatus{
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
Conditions: []metav1.Condition{{Type: "a", Status: "True", Message: "c", Reason: "d", LastTransitionTime: metav1.NewTime(time.Now().Truncate(time.Second))}},
Data: &runtime.RawExtension{Raw: []byte(`{"foo":"bar"}`)},
NetworkData: &resourceapi.NetworkDeviceData{
InterfaceName: "inf1",
IPs: []string{"10.9.8.0/24", "2001:db8::/64"},
HardwareAddress: "bc:1c:b6:3e:b8:25",
},
})
// Update the ResourceClaim status using the impersonated client
// Wait for the initial ClusterRoleBinding to propagate AND for the DRA validation to explicitly reject it.
gomega.Eventually(ctx, func(ctx context.Context) error {
_, err := saClient.ResourceV1().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, allocatedResourceClaim, metav1.UpdateOptions{})
if err == nil {
return fmt.Errorf("expected request to be rejected by DRA validation, but it succeeded")
}
// If we get a pure Forbidden (403), the basic RBAC cache hasn't updated yet. Trigger a retry.
if apierrors.IsForbidden(err) {
return fmt.Errorf("standard RBAC cache not synced yet, got pure 403 Forbidden")
}
if apierrors.IsInvalid(err) && strings.Contains(err.Error(), "is forbidden") {
return nil
}
return fmt.Errorf("unexpected error: %w", err)
}).WithTimeout(15*time.Second).WithPolling(1*time.Second).Should(gomega.Succeed(), "failed waiting for DRA validation to reject the update")
ginkgo.By("Setting the device status a first time (via ServiceAccount with RBAC with specific driver name) for driver " + b.DriverName())
newRole := role.DeepCopy()
newRole.Rules = append(newRole.Rules, rbacv1.PolicyRule{
APIGroups: []string{resourceapi.SchemeGroupVersion.Group},
Resources: []string{"resourceclaims/driver"},
Verbs: []string{"arbitrary-node:update", "arbitrary-node:patch"},
ResourceNames: []string{b.DriverName()}, // allow for the specific drivers
})
_, err = f.ClientSet.RbacV1().ClusterRoles().Update(ctx, newRole, metav1.UpdateOptions{})
framework.ExpectNoError(err)
// Use Eventually to wait for RBAC propagation
var updatedResourceClaim *resourceapi.ResourceClaim
gomega.Eventually(ctx, func(ctx context.Context) error {
var updateErr error
updatedResourceClaim, updateErr = saClient.ResourceV1().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, allocatedResourceClaim, metav1.UpdateOptions{})
// If it's a forbidden error, the RBAC cache hasn't updated yet, so we return the error to trigger a retry
return updateErr
}).WithTimeout(15*time.Second).WithPolling(1*time.Second).Should(gomega.Succeed(), "failed to update ResourceClaim status after adding RBAC rule")
gomega.Expect(updatedResourceClaim).ToNot(gomega.BeNil())
gomega.Expect(updatedResourceClaim.Status.Devices).To(gomega.Equal(allocatedResourceClaim.Status.Devices))
ginkgo.By("Updating the device status")
ginkgo.By("Updating the device status (via ServiceAccount with RBAC allowing all drivers)")
newRole = role.DeepCopy()
newRole.Rules = append(newRole.Rules, rbacv1.PolicyRule{
APIGroups: []string{resourceapi.SchemeGroupVersion.Group},
Resources: []string{"resourceclaims/driver"},
Verbs: []string{"arbitrary-node:update", "arbitrary-node:patch"},
})
_, err = f.ClientSet.RbacV1().ClusterRoles().Update(ctx, newRole, metav1.UpdateOptions{})
framework.ExpectNoError(err)
updatedResourceClaim.Status.Devices[0] = resourceapi.AllocatedDeviceStatus{
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
ShareID: shareID,
Conditions: []metav1.Condition{{Type: "e", Status: "True", Message: "g", Reason: "h", LastTransitionTime: metav1.NewTime(time.Now().Truncate(time.Second))}},
Data: &runtime.RawExtension{Raw: []byte(`{"bar":"foo"}`)},
NetworkData: &resourceapi.NetworkDeviceData{
@ -3174,11 +3322,16 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
},
}
updatedResourceClaim2, err := plugin.UpdateStatus(ctx, updatedResourceClaim)
framework.ExpectNoError(err)
var updatedResourceClaim2 *resourceapi.ResourceClaim
gomega.Eventually(ctx, func(ctx context.Context) error {
var updateErr error
updatedResourceClaim2, updateErr = saClient.ResourceV1().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, updatedResourceClaim, metav1.UpdateOptions{})
return updateErr
}).WithTimeout(15*time.Second).WithPolling(1*time.Second).Should(gomega.Succeed(), "failed to update ResourceClaim status after wildcard RBAC rule")
gomega.Expect(updatedResourceClaim2).ToNot(gomega.BeNil())
gomega.Expect(updatedResourceClaim2.Status.Devices).To(gomega.Equal(updatedResourceClaim.Status.Devices))
ginkgo.By("Verifying the final status with admin client")
getResourceClaim, err := f.ClientSet.ResourceV1().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(getResourceClaim).ToNot(gomega.BeNil())
@ -3190,6 +3343,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
nodes := drautils.NewNodes(f, 1, 4)
driver := drautils.NewDriver(f, nodes, drautils.DriverResources(1))
driver.WithKubelet = false
b := drautils.NewBuilder(f, driver)
f.It("must apply per-node permission checks", func(ctx context.Context) {
tCtx := f.TContext(ctx)
@ -3334,6 +3488,79 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
mustFailToDelete(fictionalNodeClient, "fictional plugin", createdClusterSlice, matchVAPDeniedError(fictionalNodeName, createdClusterSlice))
mustDelete(f.ClientSet, "admin", createdClusterSlice)
})
f.It("must be possible for a node ServiceAccount to update the node ResourceClaim.Status.Devices once allocated", f.WithFeatureGate(features.DRAResourceClaimDeviceStatus), func(ctx context.Context) {
tCtx := f.TContext(ctx)
claim := b.ExternalClaim()
pod := b.PodExternal(claim.Name)
b.Create(tCtx, claim, pod)
// Waits for the ResourceClaim to be allocated and the pod to be scheduled.
b.TestPod(tCtx, pod)
allocatedResourceClaim, err := f.ClientSet.ResourceV1().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(allocatedResourceClaim).ToNot(gomega.BeNil())
gomega.Expect(allocatedResourceClaim.Status.Allocation).ToNot(gomega.BeNil())
gomega.Expect(allocatedResourceClaim.Status.Allocation.Devices.Results).To(gomega.HaveLen(1))
scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(scheduledPod).ToNot(gomega.BeNil())
shareID := (*string)(allocatedResourceClaim.Status.Allocation.Devices.Results[0].ShareID)
ginkgo.By("Setting the device status a first time")
allocatedResourceClaim.Status.Devices = append(allocatedResourceClaim.Status.Devices,
resourceapi.AllocatedDeviceStatus{
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
ShareID: shareID,
Conditions: []metav1.Condition{{Type: "a", Status: "True", Message: "c", Reason: "d", LastTransitionTime: metav1.NewTime(time.Now().Truncate(time.Second))}},
Data: &runtime.RawExtension{Raw: []byte(`{"foo":"bar"}`)},
NetworkData: &resourceapi.NetworkDeviceData{
InterfaceName: "inf1",
IPs: []string{"10.9.8.0/24", "2001:db8::/64"},
HardwareAddress: "bc:1c:b6:3e:b8:25",
},
})
// Updates the ResourceClaim from the driver on the same node as the pod.
plugin, ok := driver.Nodes[scheduledPod.Spec.NodeName]
if !ok {
framework.Failf("pod got scheduled to node %s without a plugin", scheduledPod.Spec.NodeName)
}
updatedResourceClaim, err := plugin.UpdateStatus(ctx, allocatedResourceClaim)
framework.ExpectNoError(err)
gomega.Expect(updatedResourceClaim).ToNot(gomega.BeNil())
gomega.Expect(updatedResourceClaim.Status.Devices).To(gomega.Equal(allocatedResourceClaim.Status.Devices))
ginkgo.By("Updating the device status")
updatedResourceClaim.Status.Devices[0] = resourceapi.AllocatedDeviceStatus{
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
ShareID: shareID,
Conditions: []metav1.Condition{{Type: "e", Status: "True", Message: "g", Reason: "h", LastTransitionTime: metav1.NewTime(time.Now().Truncate(time.Second))}},
Data: &runtime.RawExtension{Raw: []byte(`{"bar":"foo"}`)},
NetworkData: &resourceapi.NetworkDeviceData{
InterfaceName: "inf2",
IPs: []string{"10.9.8.1/24", "2001:db8::1/64"},
HardwareAddress: "bc:1c:b6:3e:b8:26",
},
}
updatedResourceClaim2, err := plugin.UpdateStatus(ctx, updatedResourceClaim)
framework.ExpectNoError(err)
gomega.Expect(updatedResourceClaim2).ToNot(gomega.BeNil())
gomega.Expect(updatedResourceClaim2.Status.Devices).To(gomega.Equal(updatedResourceClaim.Status.Devices))
getResourceClaim, err := f.ClientSet.ResourceV1().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(getResourceClaim).ToNot(gomega.BeNil())
gomega.Expect(getResourceClaim.Status.Devices).To(gomega.Equal(updatedResourceClaim.Status.Devices))
})
})
multipleDrivers := func(nodeV1beta1, nodeV1 bool) {

View file

@ -17,7 +17,11 @@ rules:
verbs: ["get"]
- apiGroups: ["resource.k8s.io"]
resources: ["resourceclaims/status"]
verbs: ["update"]
verbs: ["patch", "update"]
- apiGroups: ["resource.k8s.io"]
resources: ["resourceclaims/driver"]
verbs: ["associated-node:patch", "associated-node:update"]
resourceNames: ["dra-kubelet-plugin-driver-name"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]

View file

@ -82,6 +82,11 @@ func (b *Builder) ClassName() string {
return b.namespace + b.Driver.NameSuffix + "-class"
}
// DriverName returns the default device driver name.
func (b *Builder) DriverName() string {
return b.Driver.Name
}
// Class returns the device Class that the builder's other objects
// reference.
func (b *Builder) Class() *DeviceClassWrapper {

View file

@ -474,7 +474,9 @@ func (d *Driver) SetUp(tCtx ktesting.TContext, kubeletRootDir string, nodes *Nod
// Create service account and corresponding RBAC rules.
d.serviceAccountName = "dra-kubelet-plugin-" + d.Name + d.InstanceSuffix + "-service-account"
content := example.PluginPermissions
content = strings.ReplaceAll(content, "dra-kubelet-plugin-namespace", tCtx.Namespace())
content = strings.ReplaceAll(content, "dra-kubelet-plugin-driver-name", d.Name)
content = strings.ReplaceAll(content, "dra-kubelet-plugin", "dra-kubelet-plugin-"+d.Name+d.InstanceSuffix)
d.createFromYAML(tCtx, []byte(content), tCtx.Namespace())

View file

@ -0,0 +1,295 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"fmt"
"strings"
"testing"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
resourceapi "k8s.io/api/resource/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
featuregatetesting "k8s.io/component-base/featuregate/testing"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/integration/framework"
)
func TestResourceClaimGranularStatusAuthorization(t *testing.T) {
// Enable Feature Gates Globally for the test run
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAResourceClaimDeviceStatus, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAResourceClaimGranularStatusAuthorization, true)
const (
ns = "dra-authz-test"
saName = "dra-plugin-sa"
claimName = "test-claim"
nodeName = "worker-1"
)
testcases := []struct {
name string
preAllocate bool
impersonateExtra map[string][]string
setupRBAC func(t *testing.T, adminClient clientset.Interface)
updateClaim func(c *resourceapi.ResourceClaim)
verifyErr func(t *testing.T, err error)
}{
{
name: "fails to update status.devices without driver permission",
preAllocate: true,
setupRBAC: func(t *testing.T, adminClient clientset.Interface) {}, // No extra RBAC beyond front-door
updateClaim: func(c *resourceapi.ResourceClaim) {
c.Status.Devices = []resourceapi.AllocatedDeviceStatus{
{Driver: "test-driver", Pool: "pool1", Device: "dev1"},
}
},
verifyErr: func(t *testing.T, err error) {
if err == nil || !apierrors.IsInvalid(err) || !strings.Contains(err.Error(), "Forbidden: changing status.devices requires") {
t.Errorf("Expected Invalid/Forbidden error, got: %v", err)
}
},
},
{
name: "succeeds with associated-node permission for same-node SA",
preAllocate: true,
impersonateExtra: map[string][]string{
"authentication.kubernetes.io/node-name": {nodeName},
},
setupRBAC: func(t *testing.T, adminClient clientset.Interface) {
createRoleAndBinding(t, adminClient, ns, saName, "node-local-driver",
[]string{"resourceclaims/driver"}, []string{"associated-node:update"})
},
updateClaim: func(c *resourceapi.ResourceClaim) {
c.Status.Devices = []resourceapi.AllocatedDeviceStatus{
{Driver: "test-driver", Pool: "pool1", Device: "dev1"},
}
},
verifyErr: func(t *testing.T, err error) {
if err != nil {
t.Errorf("Expected success via associated-node, got: %v", err)
}
},
},
{
name: "fails deallocation without binding permission",
preAllocate: true,
setupRBAC: func(t *testing.T, adminClient clientset.Interface) {},
updateClaim: func(c *resourceapi.ResourceClaim) {
c.Status.Allocation = nil
},
verifyErr: func(t *testing.T, err error) {
if err == nil || !apierrors.IsInvalid(err) || !strings.Contains(err.Error(), "Forbidden: changing status.allocation") {
t.Errorf("Expected Invalid/Forbidden on unbind, got: %v", err)
}
},
},
{
name: "succeeds to update status.reservedFor with binding permission",
preAllocate: true,
setupRBAC: func(t *testing.T, adminClient clientset.Interface) {
createClusterRoleAndBinding(t, adminClient, ns, saName, "cluster-binding-updater-reserved",
[]string{"resourceclaims/binding"}, []string{"update"})
},
updateClaim: func(c *resourceapi.ResourceClaim) {
c.Status.ReservedFor = []resourceapi.ResourceClaimConsumerReference{
{Resource: "pods", Name: "pod-1", UID: "uid-1"},
}
},
verifyErr: func(t *testing.T, err error) {
if err != nil {
t.Errorf("Expected success, got: %v", err)
}
},
},
{
name: "fails when updating both allocation and devices but missing binding permission",
preAllocate: true,
setupRBAC: func(t *testing.T, adminClient clientset.Interface) {
// Has driver permission, but LACKS binding permission
createRoleAndBinding(t, adminClient, ns, saName, "driver-only",
[]string{"resourceclaims/driver"}, []string{"arbitrary-node:update"})
},
updateClaim: func(c *resourceapi.ResourceClaim) {
// Re-allocate to a different node (requires binding)
if c.Status.Allocation != nil && c.Status.Allocation.NodeSelector != nil {
c.Status.Allocation.NodeSelector.NodeSelectorTerms[0].MatchFields[0].Values = []string{"worker-2"}
}
// Change devices (requires driver)
c.Status.Devices = []resourceapi.AllocatedDeviceStatus{
{Driver: "test-driver", Pool: "pool1", Device: "dev2"},
}
},
verifyErr: func(t *testing.T, err error) {
if err == nil || !apierrors.IsInvalid(err) || !strings.Contains(err.Error(), "Forbidden: changing status.allocation") {
t.Errorf("Expected Forbidden on simultaneous update missing binding, got: %v", err)
}
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{
"--runtime-config=api/all=true",
"--authorization-mode=RBAC",
}, framework.SharedEtcd())
t.Cleanup(server.TearDownFn)
adminClient := clientset.NewForConfigOrDie(server.ClientConfig)
// Setup Namespace and Service Account
_, err := adminClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
_, err = adminClient.CoreV1().ServiceAccounts(ns).Create(context.TODO(), &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create the base ResourceClaim
claim := &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{Name: claimName},
Spec: resourceapi.ResourceClaimSpec{
Devices: resourceapi.DeviceClaim{
Requests: []resourceapi.DeviceRequest{{
Name: "req-1",
FirstAvailable: []resourceapi.DeviceSubRequest{{
Name: "subreq-1",
DeviceClassName: "test-class",
}},
}},
},
},
}
_, err = adminClient.ResourceV1().ResourceClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Admin Pre-allocation (if required by test)
if tc.preAllocate {
c, err := adminClient.ResourceV1().ResourceClaims(ns).Get(context.TODO(), claimName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to fetch claim for pre-allocation: %v", err)
}
c.Status.Allocation = &resourceapi.AllocationResult{
NodeSelector: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{{
MatchFields: []corev1.NodeSelectorRequirement{{Key: "metadata.name", Operator: corev1.NodeSelectorOpIn, Values: []string{nodeName}}},
}},
},
Devices: resourceapi.DeviceAllocationResult{
Results: []resourceapi.DeviceRequestAllocationResult{
{Request: "req-1", Driver: "test-driver", Pool: "pool1", Device: "dev1"},
},
},
}
_, err = adminClient.ResourceV1().ResourceClaims(ns).UpdateStatus(context.TODO(), c, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Admin failed to set baseline allocation: %v", err)
}
}
// Setup RBAC
createRoleAndBinding(t, adminClient, ns, saName, "base-status-updater", []string{"resourceclaims/status"}, []string{"update", "patch"})
createRoleAndBinding(t, adminClient, ns, saName, "base-claim-reader", []string{"resourceclaims"}, []string{"get"})
tc.setupRBAC(t, adminClient)
// Build the Impersonated Client
saConfig := rest.CopyConfig(server.ClientConfig)
saConfig.Impersonate = rest.ImpersonationConfig{
UserName: fmt.Sprintf("system:serviceaccount:%s:%s", ns, saName),
Extra: tc.impersonateExtra,
}
saClient := clientset.NewForConfigOrDie(saConfig)
// Execute Test Update
cToUpdate, err := adminClient.ResourceV1().ResourceClaims(ns).Get(context.TODO(), claimName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to fetch claim before test execution: %v", err)
}
tc.updateClaim(cToUpdate)
_, testErr := saClient.ResourceV1().ResourceClaims(ns).UpdateStatus(context.TODO(), cToUpdate, metav1.UpdateOptions{})
// 7. Verify Results
tc.verifyErr(t, testErr)
})
}
}
// createRoleAndBinding is a quick helper to assign namespaced RBAC rules
func createRoleAndBinding(t *testing.T, client clientset.Interface, ns, saName, roleName string, resources, verbs []string) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{Name: roleName},
Rules: []rbacv1.PolicyRule{{
APIGroups: []string{"resource.k8s.io"},
Resources: resources,
Verbs: verbs,
}},
}
_, err := client.RbacV1().Roles(ns).Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatal(err)
}
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: roleName + "-binding"},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: ns}},
RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "Role", Name: roleName},
}
_, err = client.RbacV1().RoleBindings(ns).Create(context.TODO(), binding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatal(err)
}
}
// createClusterRoleAndBinding is a helper for cluster-scoped synthetic checks (like binding)
func createClusterRoleAndBinding(t *testing.T, client clientset.Interface, ns, saName, roleName string, resources, verbs []string) {
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: roleName},
Rules: []rbacv1.PolicyRule{{
APIGroups: []string{"resource.k8s.io"},
Resources: resources,
Verbs: verbs,
}},
}
_, err := client.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatal(err)
}
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: roleName + "-binding"},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: ns}},
RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: roleName},
}
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatal(err)
}
}