Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test-e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,4 @@ jobs:
- name: Running Test e2e
run: |
go mod tidy
APISERVER_IMAGE=kplanedev/apiserver:v0.0.2 make test-e2e
APISERVER_IMAGE=kplanedev/apiserver:v0.0.3 make test-e2e
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ make uninstall
The e2e suite uses a Kind cluster and a shared apiserver image.

```sh
APISERVER_IMAGE=kplanedev/apiserver:v0.0.2 make test-e2e
APISERVER_IMAGE=kplanedev/apiserver:v0.0.3 make test-e2e
```

Optional overrides:
Expand Down
159 changes: 3 additions & 156 deletions internal/controller/controlplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,22 +504,8 @@ func (r *ControlPlaneReconciler) clusterClientForEndpoint(endpoint string) (*kub

func (r *ControlPlaneReconciler) bootstrapVirtualCluster(ctx context.Context, clientset *kubernetes.Clientset, endpoint string, caData []byte) error {
adminNS := r.virtualAdminNamespace()
requiredNamespaces := []string{
"default",
"kube-system",
"kube-public",
"kube-node-lease",
adminNS,
}
seen := make(map[string]struct{}, len(requiredNamespaces))
for _, ns := range requiredNamespaces {
if _, ok := seen[ns]; ok {
continue
}
seen[ns] = struct{}{}
if err := ensureNamespace(ctx, clientset, ns); err != nil {
return err
}
if err := ensureNamespace(ctx, clientset, adminNS); err != nil {
return err
}

if _, err := clientset.CoreV1().ServiceAccounts(adminNS).Create(ctx, &corev1.ServiceAccount{
Expand Down Expand Up @@ -945,53 +931,9 @@ func ensureBootstrapToken(ctx context.Context, clientset *kubernetes.Clientset)
}

func ensureBootstrapRBAC(ctx context.Context, clientset *kubernetes.Clientset) error {
if err := ensurePublicInfoViewer(ctx, clientset); err != nil {
return err
}
if err := ensureKubeadmBootstrapperRole(ctx, clientset); err != nil {
return err
}
if err := ensureNodeRBAC(ctx, clientset); err != nil {
return err
}

clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"certificates.k8s.io"},
Resources: []string{"certificatesigningrequests", "certificatesigningrequests/nodeclient"},
Verbs: []string{"create", "get", "list", "watch"},
},
},
}
_, err := clientset.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}

clusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:node-bootstrapper",
},
Subjects: []rbacv1.Subject{
{
Kind: "Group",
Name: "system:bootstrappers",
},
{
Kind: "Group",
Name: "system:bootstrappers:kubeadm:default-node-token",
},
},
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}

roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -1014,25 +956,7 @@ func ensureBootstrapRBAC(ctx context.Context, clientset *kubernetes.Clientset) e
},
},
}
_, err = clientset.RbacV1().RoleBindings("kube-public").Create(ctx, roleBinding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
}

func ensurePublicInfoViewer(ctx context.Context, clientset *kubernetes.Clientset) error {
clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "system:public-info-viewer"},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"configmaps"},
Verbs: []string{"get", "list", "watch"},
},
},
}
_, err := clientset.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
_, err := clientset.RbacV1().RoleBindings("kube-public").Create(ctx, roleBinding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
Expand Down Expand Up @@ -1086,83 +1010,6 @@ func ensureKubeadmBootstrapperRole(ctx context.Context, clientset *kubernetes.Cl
return nil
}

func ensureNodeRBAC(ctx context.Context, clientset *kubernetes.Clientset) error {
clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "system:node"},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"create", "get", "list", "watch", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes/status"},
Verbs: []string{"update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"pods/status"},
Verbs: []string{"update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"services", "endpoints", "configmaps", "secrets"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"coordination.k8s.io"},
Resources: []string{"leases"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"csidrivers"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"node.k8s.io"},
Resources: []string{"runtimeclasses"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"create", "patch", "update"},
},
},
}
_, err := clientset.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}

clusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: "system:nodes"},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:node",
},
Subjects: []rbacv1.Subject{
{
Kind: "Group",
Name: "system:nodes",
},
},
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
}

func encodeBase64(data []byte) string {
if len(data) == 0 {
return ""
Expand Down
19 changes: 13 additions & 6 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ var (
// with the code source changes to be tested.
projectImage = "example.com/controlplane-operator:v0.0.1"

apiserverImage = "kplanedev/apiserver:v0.0.2"
apiserverImage = "kplanedev/apiserver:v0.0.3"
apiserverRepoDir = ""
)

Expand All @@ -64,14 +64,14 @@ var _ = BeforeSuite(func() {
if image := os.Getenv("APISERVER_IMAGE"); image != "" {
apiserverImage = image
}
apiserverArch := runtime.GOARCH
if v := os.Getenv("APISERVER_ARCH"); v != "" {
apiserverArch = v
}

var cmd *exec.Cmd
if _, err := os.Stat(apiserverRepoDir); err == nil {
By("building the apiserver binary for Kind")
apiserverArch := runtime.GOARCH
if v := os.Getenv("APISERVER_ARCH"); v != "" {
apiserverArch = v
}
apiserverBinary := fmt.Sprintf(".dev/bin/apiserver-linux-%s", apiserverArch)
cmd := exec.Command("go", "build", "-o", apiserverBinary, "./cmd/apiserver")
cmd.Env = append(os.Environ(), "GOOS=linux", fmt.Sprintf("GOARCH=%s", apiserverArch))
Expand All @@ -86,9 +86,11 @@ var _ = BeforeSuite(func() {
if output, err := cmd.CombinedOutput(); err != nil {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build apiserver image: %s", string(output))
}
} else if isLocalImagePresent(apiserverImage) {
By("using the local apiserver image")
} else {
By("pulling the apiserver image")
cmd = exec.Command("docker", "pull", apiserverImage)
cmd = exec.Command("docker", "pull", "--platform", fmt.Sprintf("linux/%s", apiserverArch), apiserverImage)
if output, err := cmd.CombinedOutput(); err != nil {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to pull apiserver image: %s", string(output))
}
Expand Down Expand Up @@ -125,6 +127,11 @@ var _ = BeforeSuite(func() {
}
})

func isLocalImagePresent(image string) bool {
cmd := exec.Command("docker", "image", "inspect", image)
return cmd.Run() == nil
}

var _ = AfterSuite(func() {
// Teardown CertManager after the suite if not skipped and if it was not already installed
if !skipCertManagerInstall && !isCertManagerAlreadyInstalled {
Expand Down
29 changes: 26 additions & 3 deletions test/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,13 +139,36 @@ func LoadImageToKindClusterWithName(name string) error {
if v, ok := os.LookupEnv("KIND_CLUSTER"); ok {
cluster = v
}
kindOptions := []string{"load", "docker-image", name, "--name", cluster}
kindBinary := defaultKindBinary
if v, ok := os.LookupEnv("KIND"); ok {
kindBinary = v
}
cmd := exec.Command(kindBinary, kindOptions...)
_, err := Run(cmd)

tmpFile, err := os.CreateTemp("", "kplane-kind-image-*.tar")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
if closeErr := tmpFile.Close(); closeErr != nil {
warnError(closeErr)
}
defer func() {
if err := os.Remove(tmpPath); err != nil {
warnError(err)
}
}()

cmd := exec.Command("docker", "save", "-o", tmpPath, name)
if _, err = Run(cmd); err == nil {
cmd = exec.Command(kindBinary, "load", "image-archive", "--name", cluster, tmpPath)
if _, err = Run(cmd); err == nil {
return nil
}
}

kindOptions := []string{"load", "docker-image", name, "--name", cluster}
cmd = exec.Command(kindBinary, kindOptions...)
_, err = Run(cmd)
return err
}

Expand Down
Loading