2

我正在研究一个运算符的早期迭代,我使用 operator-sdk 搭建了它。我已尽力遵循Operator SDK Golang TutorialKubebuilder book中的示例。我发现我可以将我的操作员部署并运行到本地集群,但我无法运行测试套件。我的测试总是产生 a panic: runtime error: invalid memory address or nil pointer dereference,我已经追查到Scheme它总是 Nil 的事实。但到目前为止,我还无法弄清楚为什么会这样。

理论上,我可以跳过测试,只在我的本地集群中测试操作符,但从长远来看,这将是非常脆弱的。我希望能够进行 TDD,更重要的是,我希望有一个测试套件与操作员一起使用,以帮助在维护模式下保持质量。

这是我的suite_test.go,我从脚手架版本中尽可能少地对其进行了修改(我所做的更改来自Kubebuilder Book):

package controllers

import (
    "path/filepath"
    "testing"

    . "github.com/onsi/ginkgo"
    . "github.com/onsi/gomega"
    "k8s.io/client-go/kubernetes/scheme"
    "k8s.io/client-go/rest"
    ctrl "sigs.k8s.io/controller-runtime"
    "sigs.k8s.io/controller-runtime/pkg/client"
    "sigs.k8s.io/controller-runtime/pkg/envtest"
    "sigs.k8s.io/controller-runtime/pkg/envtest/printer"
    logf "sigs.k8s.io/controller-runtime/pkg/log"
    "sigs.k8s.io/controller-runtime/pkg/log/zap"

    mybatch "mycorp.com/mybatch-operator/api/v1alpha1"
    // +kubebuilder:scaffold:imports
)

// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.

var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment

func TestAPIs(t *testing.T) {
    RegisterFailHandler(Fail)

    RunSpecsWithDefaultAndCustomReporters(t,
        "Controller Suite",
        []Reporter{printer.NewlineReporter{}})
}

var _ = BeforeSuite(func(done Done) {
    logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))

    By("bootstrapping test environment")
    testEnv = &envtest.Environment{
        CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
    }

    cfg, err := testEnv.Start()
    Expect(err).NotTo(HaveOccurred())
    Expect(cfg).NotTo(BeNil())

    err = mybatch.AddToScheme(scheme.Scheme)
    Expect(err).NotTo(HaveOccurred())

    // +kubebuilder:scaffold:scheme

    k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
        Scheme: scheme.Scheme,
    })
    Expect(err).ToNot(HaveOccurred())

    err = (&MyBatchReconciler{
        Client: k8sManager.GetClient(),
        Log:    ctrl.Log.WithName("controllers").WithName("MyBatch"),
    }).SetupWithManager(k8sManager)
    Expect(err).ToNot(HaveOccurred())

    go func() {
        err = k8sManager.Start(ctrl.SetupSignalHandler())
        Expect(err).ToNot(HaveOccurred())
    }()

    k8sClient = k8sManager.GetClient()
    Expect(k8sClient).ToNot(BeNil())

    close(done)
}, 60)

var _ = AfterSuite(func() {
    By("tearing down the test environment")
    err := testEnv.Stop()
    Expect(err).NotTo(HaveOccurred())
})

这是导致它失败的测试块。我有第二个Describe块(此处未显示),它测试函数之外的一些业务逻辑Reconcile,并且工作正常。

package controllers

import (
    "context"
    "time"

    . "github.com/onsi/ginkgo"
    . "github.com/onsi/gomega"
    corev1 "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/types"

    "github.com/jarcoal/httpmock"
    mybatch "mycorp.com/mybatch-operator/api/v1alpha1"
)

var _ = Describe("BatchController", func() {
    Describe("Reconcile", func() {
        // Define utility constants for object names and testing timeouts/durations and intervals.
        const (
            BatchName      = "test-batch"
            BatchNamespace = "default"
            BatchImage     = "mycorp/mockserver:latest"

            timeout  = time.Second * 10
            duration = time.Second * 10
            interval = time.Millisecond * 250
        )

        Context("When deploying MyBatch", func() {
            It("Should create a new Batch instance", func() {
                ctx := context.Background()

                // Define stub Batch
                testCR := &mybatch.MyBatch{
                    TypeMeta: metav1.TypeMeta{
                        APIVersion: "mybatch.mycorp.com/v1alpha1",
                        Kind:       "MyBatch",
                    },
                    ObjectMeta: metav1.ObjectMeta{
                        Name:      BatchName,
                        Namespace: BatchNamespace,
                    },
                    Spec: mybatch.MyBatchSpec{
                        Replicas: 1,
                        StatusCheck: mybatch.StatusCheck{
                            Url:         "http://mycorp.com",
                            Endpoint:    "/rest/jobs/jobexecutions/active",
                            PollSeconds: 20,
                        },
                        Image: BatchImage,
                        PodSpec: corev1.PodSpec{
                            // For simplicity, we only fill out the required fields.
                            Containers: []corev1.Container{
                                {
                                    Name:  "test-container",
                                    Image: BatchImage,
                                },
                            },
                            RestartPolicy: corev1.RestartPolicyAlways,
                        },
                    },
                }

                Expect(k8sClient.Create(ctx, testCR)).Should(Succeed())

                lookupKey := types.NamespacedName{Name: BatchName, Namespace: BatchNamespace}
                createdBatch := &mybatch.MyBatch{}

                // We'll need to retry getting this newly created Batch, given that creation may not immediately happen.
                Eventually(func() bool {
                    err := k8sClient.Get(ctx, lookupKey, createdBatch)
                    if err != nil {
                        return false
                    }
                    return true
                }, timeout, interval).Should(BeTrue())
                // Check the container name
                Expect(createdBatch.Spec.PodSpec.Containers[0].Name).Should(Equal(BatchName))
            })
        })
    })
})

我在这里遗漏了什么阻止Scheme正确初始化的东西吗?我不得不承认,我并不太了解Scheme. 如果有帮助,我很乐意展示其他代码。

4

0 回答 0