Skip to content

Commit

Permalink
tests: drop hardcoded kube-system CNI (#50667)
Browse files Browse the repository at this point in the history
* tests: drop hardcoded kube-system CNI

This was originally set in 2021 to make the tests run on GKE/others.
Long ago, the installation automatically detects the kube-system
requirements and installs appropriately.

As-is, this means if you install the integ test on a running cluster off
GKE, it will end up with 2 CNIs and cause havoc

* fix kube-system

* skip multi
  • Loading branch information
howardjohn committed May 9, 2024
1 parent 743a49b commit 4f049fc
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 5 deletions.
1 change: 0 additions & 1 deletion pkg/test/framework/components/istio/kube.go
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,6 @@ func commonInstallArgs(ctx resource.Context, cfg Config, c cluster.Cluster, defa

// Include all user-specified values and configuration options.
if cfg.EnableCNI {
args.AppendSet("components.cni.namespace", "kube-system")
args.AppendSet("components.cni.enabled", "true")
}

Expand Down
8 changes: 4 additions & 4 deletions tests/integration/pilot/cni_race_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func TestCNIRaceRepair(t *testing.T) {

func getCNIDaemonSet(ctx framework.TestContext, c cluster.Cluster) *appsv1.DaemonSet {
cniDaemonSet, err := c.(istioKube.CLIClient).
Kube().AppsV1().DaemonSets("kube-system").
Kube().AppsV1().DaemonSets(i.Settings().SystemNamespace).
Get(context.Background(), "istio-cni-node", metav1.GetOptions{})
if err != nil {
ctx.Fatalf("failed to get CNI Daemonset %v", err)
Expand All @@ -99,15 +99,15 @@ func getCNIDaemonSet(ctx framework.TestContext, c cluster.Cluster) *appsv1.Daemo

func deleteCNIDaemonset(ctx framework.TestContext, c cluster.Cluster) {
if err := c.(istioKube.CLIClient).
Kube().AppsV1().DaemonSets("kube-system").
Kube().AppsV1().DaemonSets(i.Settings().SystemNamespace).
Delete(context.Background(), "istio-cni-node", metav1.DeleteOptions{}); err != nil {
ctx.Fatalf("failed to delete CNI Daemonset %v", err)
}

// Wait until the CNI Daemonset pod cannot be fetched anymore
retry.UntilSuccessOrFail(ctx, func() error {
scopes.Framework.Infof("Checking if CNI Daemonset pods are deleted...")
pods, err := c.PodsForSelector(context.TODO(), "kube-system", "k8s-app=istio-cni-node")
pods, err := c.PodsForSelector(context.TODO(), i.Settings().SystemNamespace, "k8s-app=istio-cni-node")
if err != nil {
return err
}
Expand All @@ -127,7 +127,7 @@ func deployCNIDaemonset(ctx framework.TestContext, c cluster.Cluster, cniDaemonS
Labels: cniDaemonSet.ObjectMeta.Labels,
Annotations: cniDaemonSet.ObjectMeta.Annotations,
}
_, err := c.(istioKube.CLIClient).Kube().AppsV1().DaemonSets("kube-system").
_, err := c.(istioKube.CLIClient).Kube().AppsV1().DaemonSets(i.Settings().SystemNamespace).
Create(context.Background(), &deployDaemonSet, metav1.CreateOptions{})
if err != nil {
ctx.Fatalf("failed to deploy CNI Daemonset %v", err)
Expand Down
6 changes: 6 additions & 0 deletions tests/integration/pilot/multiplecontrolplanes/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,12 @@ func TestMain(m *testing.M) {
RequireMultiPrimary().
// Requires two CPs with specific names to be configured.
Label(label.CustomSetup).
// We are deploying two isolated environments, which CNI doesn't support.
// We could deploy one of the usergroups as the CNI owner, but for now we skip
SkipIf("CNI is not suppored", func(ctx resource.Context) bool {
c, _ := istio.DefaultConfig(ctx)
return c.EnableCNI
}).
SetupParallel(
namespace.Setup(&userGroup1NS, namespace.Config{Prefix: "usergroup-1", Labels: map[string]string{"usergroup": "usergroup-1"}}),
namespace.Setup(&userGroup2NS, namespace.Config{Prefix: "usergroup-2", Labels: map[string]string{"usergroup": "usergroup-2"}})).
Expand Down

0 comments on commit 4f049fc

Please sign in to comment.