Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

UDN: Add Network Policy e2e tests #29195

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions test/extended/networking/network_segmentation.go
Original file line number Diff line number Diff line change
Expand Up @@ -900,6 +900,12 @@ func withCommand(cmdGenerationFn func() []string) podOption {
}
}

func withLabels(labels map[string]string) podOption {
return func(pod *podConfiguration) {
pod.labels = labels
}
}

func withNetworkAttachment(networks []nadapi.NetworkSelectionElement) podOption {
return func(pod *podConfiguration) {
pod.attachments = networks
Expand Down
305 changes: 305 additions & 0 deletions test/extended/networking/network_segmentation_policy.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,305 @@
package networking

import (
"context"
"fmt"
"net"
"strings"

nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
exutil "github.com/openshift/origin/test/extended/util"

v1 "k8s.io/api/core/v1"
knet "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
admissionapi "k8s.io/pod-security-admission/api"
)

var _ = ginkgo.Describe("[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks] Network Policies", func() {
defer ginkgo.GinkgoRecover()

oc := exutil.NewCLIWithPodSecurityLevel("network-segmentation-policy-e2e", admissionapi.LevelPrivileged)
f := oc.KubeFramework()
InOVNKubernetesContext(func() {
const (
nadName = "tenant-red"
userDefinedNetworkIPv4Subnet = "10.128.0.0/16"
userDefinedNetworkIPv6Subnet = "2014:100:200::0/60"
port = 9000
netPrefixLengthPerNode = 24
randomStringLength = 5
nameSpaceYellowSuffix = "yellow"
namespaceBlueSuffix = "blue"
)

var (
cs clientset.Interface
nadClient nadclient.K8sCniCncfIoV1Interface
allowServerPodLabel = map[string]string{"foo": "bar"}
denyServerPodLabel = map[string]string{"abc": "xyz"}
)

ginkgo.BeforeEach(func() {
cs = f.ClientSet

var err error
nadClient, err = nadclient.NewForConfig(f.ClientConfig())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix)
namespaceBlue := getNamespaceName(f, namespaceBlueSuffix)
for _, namespace := range []string{namespaceYellow, namespaceBlue} {
ginkgo.By("Creating namespace " + namespace)
ns, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
f.AddNamespacesToDelete(ns)
}
})

ginkgo.DescribeTable(
"pods within namespace should be isolated when deny policy is present",
func(
topology string,
clientPodConfig podConfiguration,
serverPodConfig podConfiguration,
) {
ginkgo.By("Creating the attachment configuration")
netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{
name: nadName,
topology: topology,
cidr: correctCIDRFamily(oc, userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet),
role: "primary",
})
netConfig.namespace = f.Namespace.Name
_, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create(
context.Background(),
generateNAD(netConfig),
metav1.CreateOptions{},
)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("creating client/server pods")
serverPodConfig.namespace = f.Namespace.Name
clientPodConfig.namespace = f.Namespace.Name
runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil)
runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil)

var serverIP string
for i, cidr := range strings.Split(netConfig.cidr, ",") {
if cidr != "" {
ginkgo.By("asserting the server pod has an IP from the configured range")
serverIP, err = podIPsForUserDefinedPrimaryNetwork(
cs,
f.Namespace.Name,
serverPodConfig.name,
namespacedName(f.Namespace.Name, netConfig.name),
i,
)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
const netPrefixLengthPerNode = 24
ginkgo.By(fmt.Sprintf("asserting the server pod IP %v is from the configured range %v/%v", serverIP, cidr, netPrefixLengthPerNode))
subnet, err := getNetCIDRSubnet(cidr)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(inRange(subnet, serverIP)).To(gomega.Succeed())
}

ginkgo.By("asserting the *client* pod can contact the server pod exposed endpoint")
podShouldReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(serverIP), port))
}

ginkgo.By("creating a \"default deny\" network policy")
_, err = makeDenyAllPolicy(f, f.Namespace.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("asserting the *client* pod can not contact the server pod exposed endpoint")
podShouldNotReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(serverIP), port))

},
ginkgo.Entry(
"in L2 dualstack primary UDN",
"layer2",
*podConfig(
"client-pod",
),
*podConfig(
"server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
),
),
ginkgo.Entry(
"in L3 dualstack primary UDN",
"layer3",
*podConfig(
"client-pod",
),
*podConfig(
"server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
),
),
)

ginkgo.DescribeTable(
"allow ingress traffic to one pod from a particular namespace",
func(
topology string,
clientPodConfig podConfiguration,
allowServerPodConfig podConfiguration,
denyServerPodConfig podConfiguration,
) {

namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix)
namespaceBlue := getNamespaceName(f, namespaceBlueSuffix)

nad := networkAttachmentConfigParams{
topology: topology,
cidr: correctCIDRFamily(oc, userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet),
// Both yellow and blue namespaces are going to served by green network.
// Use random suffix for the network name to avoid race between tests.
networkName: fmt.Sprintf("%s-%s", "green", rand.String(randomStringLength)),
role: "primary",
}

// Use random suffix in net conf name to avoid race between tests.
netConfName := fmt.Sprintf("sharednet-%s", rand.String(randomStringLength))
for _, namespace := range []string{namespaceYellow, namespaceBlue} {
ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace)
netConfig := newNetworkAttachmentConfig(nad)
netConfig.namespace = namespace
netConfig.name = netConfName

_, err := nadClient.NetworkAttachmentDefinitions(namespace).Create(
context.Background(),
generateNAD(netConfig),
metav1.CreateOptions{},
)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

ginkgo.By("creating client/server pods")
allowServerPodConfig.namespace = namespaceYellow
denyServerPodConfig.namespace = namespaceYellow
clientPodConfig.namespace = namespaceBlue
runUDNPod(cs, namespaceYellow, allowServerPodConfig, nil)
runUDNPod(cs, namespaceYellow, denyServerPodConfig, nil)
runUDNPod(cs, namespaceBlue, clientPodConfig, nil)

ginkgo.By("asserting the server pods have an IP from the configured range")
allowServerPodIP, err := podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, allowServerPodConfig.name,
namespacedName(namespaceYellow, netConfName), 0)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("asserting the allow server pod IP %v is from the configured range %v/%v", allowServerPodIP,
userDefinedNetworkIPv4Subnet, netPrefixLengthPerNode))
subnet, err := getNetCIDRSubnet(userDefinedNetworkIPv4Subnet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(inRange(subnet, allowServerPodIP)).To(gomega.Succeed())
denyServerPodIP, err := podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, denyServerPodConfig.name,
namespacedName(namespaceYellow, netConfName), 0)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("asserting the deny server pod IP %v is from the configured range %v/%v", denyServerPodIP,
userDefinedNetworkIPv4Subnet, netPrefixLengthPerNode))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(inRange(subnet, denyServerPodIP)).To(gomega.Succeed())

ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint")
podShouldReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port))

ginkgo.By("asserting the *client* pod can contact the deny server pod exposed endpoint")
podShouldReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port))

ginkgo.By("creating a \"default deny\" network policy")
_, err = makeDenyAllPolicy(f, namespaceYellow)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("asserting the *client* pod can not contact the allow server pod exposed endpoint")
podShouldNotReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port))

ginkgo.By("asserting the *client* pod can not contact the deny server pod exposed endpoint")
podShouldNotReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port))

ginkgo.By("creating a \"allow-traffic-to-pod\" network policy")
_, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, "allow-traffic-to-pod", allowServerPodLabel)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint")
podShouldReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(allowServerPodIP), port))

ginkgo.By("asserting the *client* pod can not contact deny server pod exposed endpoint")
podShouldNotReach(oc, clientPodConfig.name, formatHostAndPort(net.ParseIP(denyServerPodIP), port))
},
ginkgo.Entry(
"in L2 primary UDN",
"layer2",
*podConfig(
"client-pod",
),
*podConfig(
"allow-server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
withLabels(allowServerPodLabel),
),
*podConfig(
"deny-server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
withLabels(denyServerPodLabel),
),
),
ginkgo.Entry(
"in L3 primary UDN",
"layer3",
*podConfig(
"client-pod",
),
*podConfig(
"allow-server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
withLabels(allowServerPodLabel),
),
*podConfig(
"deny-server-pod",
withCommand(func() []string {
return httpServerContainerCmd(port)
}),
withLabels(denyServerPodLabel),
),
))
})
})

func getNamespaceName(f *framework.Framework, nsSuffix string) string {
return fmt.Sprintf("%s-%s", f.Namespace.Name, nsSuffix)
}

func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) {
policy := &knet.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
Spec: knet.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{MatchLabels: podLabel},
PolicyTypes: []knet.PolicyType{knet.PolicyTypeIngress},
Ingress: []knet.NetworkPolicyIngressRule{{From: []knet.NetworkPolicyPeer{
{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace}}}}}},
},
}
return f.ClientSet.NetworkingV1().NetworkPolicies(namespace).Create(context.TODO(), policy, metav1.CreateOptions{})
}

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions zz_generated.manifests/test-reporting.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,18 @@ spec:
UserDefinedNetwork mirrors EndpointSlices managed by the default controller
for namespaces with user defined primary networks L3 primary UDN, host-networked
pods'
- testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks]
Network Policies when using openshift ovn-kubernetes allow ingress traffic
to one pod from a particular namespace in L2 primary UDN'
- testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks]
Network Policies when using openshift ovn-kubernetes allow ingress traffic
to one pod from a particular namespace in L3 primary UDN'
- testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks]
Network Policies when using openshift ovn-kubernetes pods within namespace
should be isolated when deny policy is present in L2 dualstack primary UDN'
- testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks]
Network Policies when using openshift ovn-kubernetes pods within namespace
should be isolated when deny policy is present in L3 dualstack primary UDN'
- testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks]
when using openshift ovn-kubernetes UserDefinedNetwork pod connected to UserDefinedNetwork
cannot be deleted when being used'
Expand Down