mirror of
https://github.com/k3s-io/k3s.git
synced 2026-04-29 02:00:47 -04:00
Migrate svcpolicies E2E test to docker
Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
parent
380a70ac7e
commit
a61e581cb3
9 changed files with 211 additions and 174 deletions
6
.github/workflows/e2e.yaml
vendored
6
.github/workflows/e2e.yaml
vendored
|
|
@ -160,7 +160,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
dtest: [autoimport, basics, bootstraptoken, cacerts, etcd, hardened, lazypull, skew, secretsencryption, snapshotrestore, token, upgrade]
|
||||
dtest: [autoimport, basics, bootstraptoken, cacerts, etcd, hardened, lazypull, skew, secretsencryption, snapshotrestore, svcpoliciesandfirewall, token, upgrade]
|
||||
arch: [amd64, arm64]
|
||||
exclude:
|
||||
- dtest: autoimport
|
||||
|
|
@ -169,6 +169,8 @@ jobs:
|
|||
arch: arm64
|
||||
- dtest: snapshotrestore
|
||||
arch: arm64
|
||||
- dtest: svcpoliciesandfirewall
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }}
|
||||
env:
|
||||
CHANNEL: ${{ needs.build-go-tests.outputs.channel }}
|
||||
|
|
@ -209,7 +211,7 @@ jobs:
|
|||
cd ./tests/docker/${{ matrix.dtest }}
|
||||
|
||||
# These tests use rancher/systemd-node and have different flags.
|
||||
CI_TESTS="autoimport hardened secretsencryption snapshotrestore token"
|
||||
CI_TESTS="autoimport hardened secretsencryption snapshotrestore svcpoliciesandfirewall token"
|
||||
if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then
|
||||
./${{ matrix.dtest }}.test -test.timeout=0 -test.v -ginkgo.v -k3sImage=$K3S_IMAGE -channel=$CHANNEL
|
||||
elif [[ $CI_TESTS =~ ${{ matrix.dtest }} ]]; then
|
||||
|
|
|
|||
|
|
@ -62,6 +62,23 @@ func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) {
|
|||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
// Returns all internal IPs of the nodes in the cluster as map[node][ip]
|
||||
func GetInternalIPs(kubeconfigFile string) (map[string]string, error) {
|
||||
nodes, err := ParseNodes(kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := make(map[string]string)
|
||||
for _, node := range nodes {
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == corev1.NodeInternalIP {
|
||||
ips[node.Name] = address.Address
|
||||
}
|
||||
}
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeconfigFile string) ([]corev1.Pod, error) {
|
||||
clientSet, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
|
|
|
|||
61
tests/docker/resources/loadbalancer-allTraffic.yaml
Normal file
61
tests/docker/resources/loadbalancer-allTraffic.yaml
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-config
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 80;
|
||||
location /ip {
|
||||
return 200 "$remote_addr\n";
|
||||
}
|
||||
# Default location block to serve the default "Welcome to nginx" page
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-loadbalancer
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: nginx-app-loadbalancer
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-app-loadbalancer
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: ranchertest/mytestcontainer
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: nginx-config-volume
|
||||
mountPath: /etc/nginx/conf.d
|
||||
volumes:
|
||||
- name: nginx-config-volume
|
||||
configMap:
|
||||
name: nginx-config
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-loadbalancer-svc
|
||||
labels:
|
||||
k8s-app: nginx-app-loadbalancer
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 81
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: nginx-app-loadbalancer
|
||||
44
tests/docker/resources/pod_client.yaml
Normal file
44
tests/docker/resources/pod_client.yaml
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
name: client-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
spec:
|
||||
containers:
|
||||
- image: ranchertest/mytestcontainer
|
||||
imagePullPolicy: Always
|
||||
name: client-curl
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- client
|
||||
topologyKey: kubernetes.io/hostname
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: client-curl
|
||||
labels:
|
||||
app: client
|
||||
service: client-curl
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: client
|
||||
ports:
|
||||
- port: 8080
|
||||
|
|
@ -14,57 +14,38 @@ import (
|
|||
"text/template"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
"github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Valid nodeOS: bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64
|
||||
var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system")
|
||||
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
|
||||
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
|
||||
var ci = flag.Bool("ci", false, "running on CI")
|
||||
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
|
||||
|
||||
func Test_E2EPoliciesAndFirewall(t *testing.T) {
|
||||
func Test_DockerPoliciesAndFirewall(t *testing.T) {
|
||||
flag.Parse()
|
||||
RegisterFailHandler(Fail)
|
||||
suiteConfig, reporterConfig := GinkgoConfiguration()
|
||||
RunSpecs(t, "Services Traffic Policies and Firewall config Suite", suiteConfig, reporterConfig)
|
||||
RunSpecs(t, "Services Traffic Policies and Firewall config Suite")
|
||||
}
|
||||
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
var tc *docker.TestConfig
|
||||
|
||||
var _ = Describe("Verify Services Traffic policies and firewall config", Ordered, func() {
|
||||
|
||||
Context("Start cluster with minimal configuration", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
Context("Setup Cluster", func() {
|
||||
It("should provision servers and agents", func() {
|
||||
var err error
|
||||
if *local {
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Node Status", func() {
|
||||
tc, err = docker.NewTestConfig("rancher/systemd-node")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(tc.ProvisionServers(*serverCount)).To(Succeed())
|
||||
Expect(tc.ProvisionAgents(*agentCount)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tests.NodesReady(tc.KubeconfigFile, e2e.VagrantSlice(tc.AllNodes()))
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
e2e.DumpNodes(tc.KubeconfigFile)
|
||||
})
|
||||
|
||||
It("Checks Pod Status", func() {
|
||||
return tests.CheckDefaultDeployments(tc.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeconfigFile)
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeconfigFile)
|
||||
return tests.NodesReady(tc.KubeconfigFile, tc.GetNodeNames())
|
||||
}, "40s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
Context("Deploy external traffic workloads to test external traffic policies", func() {
|
||||
|
|
@ -72,7 +53,7 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
// Verifies that the external-ip is only set to the node IP where the server runs
|
||||
// It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP
|
||||
It("Verify external traffic policy=local gets set up correctly", func() {
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
_, err := tc.DeployWorkload("loadbalancer-allTraffic.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed")
|
||||
_, err = tc.DeployWorkload("loadbalancer-extTrafficPol.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed")
|
||||
|
|
@ -91,11 +72,11 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
}, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found")
|
||||
|
||||
var serverNodeIP string
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeconfigFile)
|
||||
nodeIPs, err := tests.GetInternalIPs(tc.KubeconfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get node IPs")
|
||||
for _, node := range nodeIPs {
|
||||
if node.Name == serverNodeName {
|
||||
serverNodeIP = node.IPv4
|
||||
for name, ip := range nodeIPs {
|
||||
if name == serverNodeName {
|
||||
serverNodeIP = ip
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -103,11 +84,12 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
Eventually(func() ([]string, error) {
|
||||
return e2e.FetchExternalIPs(tc.KubeconfigFile, lbSvc)
|
||||
return docker.FetchExternalIPs(tc.KubeconfigFile, lbSvc)
|
||||
}, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
externalIPs, _ := e2e.FetchExternalIPs(tc.KubeconfigFile, lbSvcExt)
|
||||
externalIPs, err := docker.FetchExternalIPs(tc.KubeconfigFile, lbSvcExt)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed to fetch external IPs")
|
||||
g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found")
|
||||
g.Expect(externalIPs[0]).To(Equal(serverNodeIP), "external IP does not match servernodeIP")
|
||||
}, "25s", "5s").Should(Succeed())
|
||||
|
|
@ -117,22 +99,22 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
// It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ
|
||||
It("Verify connectivity in external traffic policy=local", func() {
|
||||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeconfigFile, lbSvc)
|
||||
lbSvcExternalIPs, _ := docker.FetchExternalIPs(tc.KubeconfigFile, lbSvc)
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeconfigFile, lbSvcExt)
|
||||
lbSvcExtExternalIPs, _ := docker.FetchExternalIPs(tc.KubeconfigFile, lbSvcExt)
|
||||
|
||||
// Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ
|
||||
for _, externalIP := range lbSvcExternalIPs {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -m 5 -s -f http://" + externalIP + ":81/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
return docker.RunCommand(cmd)
|
||||
}, "25s", "5s").Should(ContainSubstring("10.42"))
|
||||
}
|
||||
|
||||
// Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -m 5 -s -f http://" + lbSvcExtExternalIPs[0] + ":82/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
return docker.RunCommand(cmd)
|
||||
}, "25s", "5s").ShouldNot(ContainSubstring("10.42"))
|
||||
})
|
||||
|
||||
|
|
@ -145,21 +127,15 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
|
||||
// Check that service exists
|
||||
Eventually(func() (string, error) {
|
||||
clusterIP, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-loadbalancer-svc-int", false)
|
||||
cmd := "kubectl get svc nginx-loadbalancer-svc-int -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + tc.KubeconfigFile
|
||||
clusterIP, _ := docker.RunCommand(cmd)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
|
||||
// Check that client pods are running
|
||||
Eventually(func() string {
|
||||
pods, err := tests.ParsePods(tc.KubeconfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "client-deployment") {
|
||||
return string(pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}, "50s", "5s").Should(Equal("Running"))
|
||||
Eventually(func() error {
|
||||
return tests.CheckDeployments([]string{"client-deployment"}, tc.KubeconfigFile)
|
||||
}, "50s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
// Verifies that only the client pod running in the same node as the server pod can access the service
|
||||
|
|
@ -200,16 +176,16 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
|
||||
var workingCmd, nonWorkingCmd string
|
||||
if serverNodeName == clientPod1Node {
|
||||
workingCmd = "kubectl exec " + clientPod1 + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl exec " + clientPod2 + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip"
|
||||
workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1)
|
||||
nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2)
|
||||
}
|
||||
if serverNodeName == clientPod2Node {
|
||||
workingCmd = "kubectl exec " + clientPod2 + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl exec " + clientPod1 + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip"
|
||||
workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2)
|
||||
nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1)
|
||||
}
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
out, err := e2e.RunCommand(workingCmd)
|
||||
out, err := docker.RunCommand(workingCmd)
|
||||
return out, err
|
||||
}, "25s", "5s").Should(SatisfyAny(
|
||||
ContainSubstring(clientPod1IP),
|
||||
|
|
@ -218,7 +194,7 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
|
||||
// Check the non working command fails because of internal traffic policy=local
|
||||
Eventually(func() bool {
|
||||
_, err := e2e.RunCommand(nonWorkingCmd)
|
||||
_, err := docker.RunCommand(nonWorkingCmd)
|
||||
if err != nil && strings.Contains(err.Error(), "exit status") {
|
||||
// Treat exit status as a successful condition
|
||||
return true
|
||||
|
|
@ -228,9 +204,9 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered
|
|||
|
||||
// curling a service with internal traffic policy=cluster. It should work on both pods
|
||||
for _, pod := range []string{clientPod1, clientPod2} {
|
||||
cmd := "kubectl exec " + pod + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc:81/ip"
|
||||
cmd := "kubectl exec " + "--kubeconfig=" + tc.KubeconfigFile + " " + pod + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc:81/ip"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
return docker.RunCommand(cmd)
|
||||
}, "20s", "5s").Should(SatisfyAny(
|
||||
ContainSubstring(clientPod1IP),
|
||||
ContainSubstring(clientPod2IP),
|
||||
|
|
@ -258,18 +234,18 @@ spec:
|
|||
selector:
|
||||
k8s-app: nginx-app-loadbalancer-ext
|
||||
`
|
||||
// Remove the service nginx-loadbalancer-svc-ext
|
||||
_, err := e2e.RunCommand("kubectl --kubeconfig=" + tc.KubeconfigFile + " delete svc nginx-loadbalancer-svc-ext")
|
||||
By("Removing the service nginx-loadbalancer-svc-ext")
|
||||
_, err := docker.RunCommand("kubectl --kubeconfig=" + tc.KubeconfigFile + " delete svc nginx-loadbalancer-svc-ext")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext")
|
||||
|
||||
// Parse and execute the template with the node IP
|
||||
tmpl, err := template.New("service").Parse(serviceManifest)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeconfigFile)
|
||||
nodeIPs, err := tests.GetInternalIPs(tc.KubeconfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var filledManifest strings.Builder
|
||||
err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodeIPs[0].IPv4})
|
||||
err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodeIPs[tc.Servers[0].Name]})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Write the filled manifest to a temporary file
|
||||
|
|
@ -281,44 +257,33 @@ spec:
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
tmpFile.Close()
|
||||
|
||||
// Apply the manifest using kubectl
|
||||
applyCmd := fmt.Sprintf("kubectl apply -f %s", tmpFile.Name())
|
||||
out, err := e2e.RunCommand(applyCmd)
|
||||
By("Applying the new manifest")
|
||||
applyCmd := fmt.Sprintf("kubectl apply --kubeconfig=%s -f %s", tc.KubeconfigFile, tmpFile.Name())
|
||||
out, err := docker.RunCommand(applyCmd)
|
||||
Expect(err).NotTo(HaveOccurred(), out)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
clusterIP, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-loadbalancer-svc-ext-firewall", false)
|
||||
cmd := "kubectl get svc nginx-loadbalancer-svc-ext-firewall -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + tc.KubeconfigFile
|
||||
clusterIP, _ := docker.RunCommand(cmd)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
})
|
||||
|
||||
// Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP)
|
||||
It("Verify firewall is working", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeconfigFile)
|
||||
It("Verifies firewall is working, only server-0 should be able to curl both IPs", func() {
|
||||
nodeIPs, err := tests.GetInternalIPs(tc.KubeconfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var firstNode e2e.VagrantNode
|
||||
var secondNode e2e.VagrantNode
|
||||
for _, node := range tc.AllNodes() {
|
||||
if node.String() == nodeIPs[0].Name {
|
||||
firstNode = node
|
||||
} else {
|
||||
secondNode = node
|
||||
}
|
||||
}
|
||||
fmt.Println("First node: ", firstNode.String())
|
||||
fmt.Println("Second node: ", secondNode.String())
|
||||
for _, ip := range nodeIPs {
|
||||
// Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -m 5 -s -f http:// " + ip.IPv4 + ":82"
|
||||
return firstNode.RunCmdOnNode(cmd)
|
||||
cmd := "curl -m 5 -s -f http:// " + ip + ":82"
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "40s", "5s").Should(ContainSubstring("Welcome to nginx"))
|
||||
|
||||
// Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges
|
||||
Eventually(func(g Gomega) error {
|
||||
cmd := "curl -m 5 -s -f http:// " + ip.IPv4 + ":82"
|
||||
_, err := secondNode.RunCmdOnNode(cmd)
|
||||
cmd := "curl -m 5 -s -f http:// " + ip + ":82"
|
||||
_, err := tc.Agents[0].RunCmdOnNode(cmd)
|
||||
return err
|
||||
}, "40s", "5s").Should(MatchError(ContainSubstring("exit status")))
|
||||
}
|
||||
|
|
@ -332,13 +297,7 @@ var _ = AfterEach(func() {
|
|||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(tc.AllNodes())).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(tc.AllNodes())).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeconfigFile)).To(Succeed())
|
||||
if *ci || (tc != nil && !failed) {
|
||||
Expect(tc.Cleanup()).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
|
|
@ -608,6 +609,34 @@ func (config TestConfig) DeployWorkload(workload string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
type svcExternalIP struct {
|
||||
IP string `json:"ip"`
|
||||
IPMode string `json:"ipMode"`
|
||||
}
|
||||
|
||||
// FetchExternalIPs fetches the external IPs of a service
|
||||
func FetchExternalIPs(kubeconfig string, servicename string) ([]string, error) {
|
||||
var externalIPs []string
|
||||
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.status.loadBalancer.ingress}' --kubeconfig=" + kubeconfig
|
||||
output, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return externalIPs, err
|
||||
}
|
||||
|
||||
var svcExternalIPs []svcExternalIP
|
||||
err = json.Unmarshal([]byte(output), &svcExternalIPs)
|
||||
if err != nil {
|
||||
return externalIPs, fmt.Errorf("error unmarshalling JSON: %v", err)
|
||||
}
|
||||
|
||||
// Iterate over externalIPs and append each IP to the ips slice
|
||||
for _, ipEntry := range svcExternalIPs {
|
||||
externalIPs = append(externalIPs, ipEntry.IP)
|
||||
}
|
||||
|
||||
return externalIPs, nil
|
||||
}
|
||||
|
||||
// RestartCluster restarts the k3s service on each node given
|
||||
func RestartCluster(nodes []DockerNode) error {
|
||||
for _, node := range nodes {
|
||||
|
|
|
|||
75
tests/e2e/svcpoliciesandfirewall/Vagrantfile
vendored
75
tests/e2e/svcpoliciesandfirewall/Vagrantfile
vendored
|
|
@ -1,75 +0,0 @@
|
|||
ENV['VAGRANT_NO_PARALLEL'] = 'no'
|
||||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
|
||||
["server-0", "agent-0" ])
|
||||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
|
||||
['bento/ubuntu-24.04', 'bento/ubuntu-24.04'])
|
||||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
|
||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
||||
GOCOVER = (ENV['E2E_GOCOVER'] || "")
|
||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
|
||||
NETWORK4_PREFIX = "10.10.10"
|
||||
install_type = ""
|
||||
|
||||
def provision(vm, role, role_num, node_num)
|
||||
vm.box = NODE_BOXES[node_num]
|
||||
vm.hostname = role
|
||||
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
|
||||
vm.network "private_network", :ip => node_ip4, :netmask => "255.255.255.0"
|
||||
|
||||
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
|
||||
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
|
||||
load vagrant_defaults
|
||||
|
||||
defaultOSConfigure(vm)
|
||||
addCoverageDir(vm, role, GOCOVER)
|
||||
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
|
||||
|
||||
if role.include?("server") && role_num == 0
|
||||
vm.provision :k3s, run: 'once' do |k3s|
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
k3s.args = "server "
|
||||
k3s.config = <<~YAML
|
||||
node-ip: #{node_ip4}
|
||||
token: vagrant
|
||||
YAML
|
||||
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
|
||||
end
|
||||
end
|
||||
if role.include?("agent")
|
||||
vm.provision :k3s, run: 'once' do |k3s|
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
k3s.args = "agent "
|
||||
k3s.config = <<~YAML
|
||||
server: https://#{NETWORK4_PREFIX}.100:6443
|
||||
token: vagrant
|
||||
node-ip: #{node_ip4}
|
||||
YAML
|
||||
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt", "vagrant-scp"]
|
||||
config.vm.provider "libvirt" do |v|
|
||||
v.cpus = NODE_CPUS
|
||||
v.memory = NODE_MEMORY
|
||||
# We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs
|
||||
v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_"
|
||||
end
|
||||
|
||||
if NODE_ROLES.kind_of?(String)
|
||||
NODE_ROLES = NODE_ROLES.split(" ", -1)
|
||||
end
|
||||
if NODE_BOXES.kind_of?(String)
|
||||
NODE_BOXES = NODE_BOXES.split(" ", -1)
|
||||
end
|
||||
|
||||
NODE_ROLES.each_with_index do |role, i|
|
||||
role_num = role.split("-", -1).pop.to_i
|
||||
config.vm.define role do |node|
|
||||
provision(node.vm, role, role_num, i)
|
||||
end
|
||||
end
|
||||
end
|
||||
Loading…
Reference in a new issue