3

我正在按照本教程创建我的第一个名为 PodSet 的自定义资源,目前正在第 6 步(共 7 步)测试我的 CR。

这是我的 Operator SDK 控制器 Go 代码:

package controllers

import (
    "context"
    "reflect"

    "github.com/go-logr/logr"
    "k8s.io/apimachinery/pkg/labels"
    "k8s.io/apimachinery/pkg/runtime"
    ctrl "sigs.k8s.io/controller-runtime"
    "sigs.k8s.io/controller-runtime/pkg/client"
    "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
    "sigs.k8s.io/controller-runtime/pkg/reconcile"

    appv1alpha1 "github.com/redhat/podset-operator/api/v1alpha1"
    corev1 "k8s.io/api/core/v1"
    "k8s.io/apimachinery/pkg/api/errors"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// PodSetReconciler reconciles a PodSet object
type PodSetReconciler struct {
    client.Client
    Log    logr.Logger
    Scheme *runtime.Scheme
}

// +kubebuilder:rbac:groups=app.example.com,resources=podsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=app.example.com,resources=podsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=v1,resources=pods,verbs=get;list;watch;create;update;patch;delete

// Reconcile is the core logic of controller
func (r *PodSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
    _ = context.Background()
    _ = r.Log.WithValues("podset", req.NamespacedName)

    // Fetch the PodSet instance (the parent of the pods)
    instance := &appv1alpha1.PodSet{}
    err := r.Get(context.Background(), req.NamespacedName, instance)
    if err != nil {
        if errors.IsNotFound(err) {
            // Request object not found, could have been deleted after reconcile request.
            // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
            // Return and don't requeue
            return reconcile.Result{}, nil
        }
        // Error reading the object - requeue the request
        return reconcile.Result{}, err
    }

    // List all pods owned by this PodSet instance
    podSet := instance
    podList := &corev1.PodList{}
    labelz := map[string]string{
        "app":     podSet.Name, // the metadata.name field from user's CR PodSet YAML file
        "version": "v0.1",
    }
    labelSelector := labels.SelectorFromSet(labelz)
    listOpts := &client.ListOptions{Namespace: podSet.Namespace, LabelSelector: labelSelector}
    if err = r.List(context.Background(), podList, listOpts); err != nil {
        return reconcile.Result{}, err
    }

    // Count the pods that are pending or running and add them to available array
    var available []corev1.Pod
    for _, pod := range podList.Items {
        if pod.ObjectMeta.DeletionTimestamp != nil {
            continue
        }
        if pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodPending {
            available = append(available, pod)
        }
    }
    numAvailable := int32(len(available))
    availableNames := []string{}
    for _, pod := range available {
        availableNames = append(availableNames, pod.ObjectMeta.Name)
    }

    // Update the status if necessary
    status := appv1alpha1.PodSetStatus{
        PodNames:          availableNames,
        AvailableReplicas: numAvailable,
    }
    if !reflect.DeepEqual(podSet.Status, status) {
        podSet.Status = status
        err = r.Status().Update(context.Background(), podSet)
        if err != nil {
            r.Log.Error(err, "Failed to update PodSet status")
            return reconcile.Result{}, err
        }
    }

    // When the number of pods in the cluster is bigger that what we want, scale down
    if numAvailable > podSet.Spec.Replicas {
        r.Log.Info("Scaling down pods", "Currently available", numAvailable, "Required replicas", podSet.Spec.Replicas)
        diff := numAvailable - podSet.Spec.Replicas
        toDeletePods := available[:diff] // Syntax help: https://play.golang.org/p/SHAMCdd12sp
        for _, toDeletePod := range toDeletePods {
            err = r.Delete(context.Background(), &toDeletePod)
            if err != nil {
                r.Log.Error(err, "Failed to delete pod", "pod.name", toDeletePod.Name)
                return reconcile.Result{}, err
            }
        }
        return reconcile.Result{Requeue: true}, nil
    }

    // When the number of pods in the cluster is smaller that what we want, scale up
    if numAvailable < podSet.Spec.Replicas {
        r.Log.Info("Scaling up pods", "Currently available", numAvailable, "Required replicas", podSet.Spec.Replicas)
        // Define a new Pod object
        pod := newPodForCR(podSet)
        // Set PodSet instance as the owner of the Pod
        if err := controllerutil.SetControllerReference(podSet, pod, r.Scheme); err != nil {
            return reconcile.Result{}, err
        }
        err = r.Create(context.Background(), pod)
        if err != nil {
            r.Log.Error(err, "Failed to create pod", "pod.name", pod.Name)
            return reconcile.Result{}, err
        }
        return reconcile.Result{Requeue: true}, nil
    }

    return ctrl.Result{}, nil
}

// newPodForCR returns a busybox pod with the same name/namespace as the cr
func newPodForCR(cr *appv1alpha1.PodSet) *corev1.Pod {
    labels := map[string]string{
        "app":     cr.Name, // the metadata.name field from user's CR PodSet YAML file
        "version": "v0.1",
    }
    return &corev1.Pod{
        ObjectMeta: metav1.ObjectMeta{
            GenerateName: cr.Name + "-pod",
            Namespace:    cr.Namespace,
            Labels:       labels,
        },
        Spec: corev1.PodSpec{
            Containers: []corev1.Container{
                {
                    Name:    "busybox",
                    Image:   "busybox",
                    Command: []string{"sleep", "3600"},
                },
            },
        },
    }
}

// SetupWithManager defines how the controller will watch for resources
func (r *PodSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
    return ctrl.NewControllerManagedBy(mgr).
        For(&appv1alpha1.PodSet{}).
        Owns(&corev1.Pod{}).
        Complete(r)
}

当我在 YAML 文件下方应用时,我看到了 pod 的奇怪行为。他们在最初的几秒钟里很挣扎——他们中的一些人站起来运行了一段时间,很快就进入了终止状态。当我让它们保持几秒钟不变时,CR 就达到了所需的状态。

apiVersion: app.example.com/v1alpha1
kind: PodSet
metadata:
  name: podset-sample
spec:
  replicas: 5

我在这段视频中捕捉到了上面的部署场景。这是来自我的本地终端运行命令的完整日志(抱歉,我必须使用 Pastebin,因为 SO 不允许我在此处粘贴完整日志,因为它们太长了)WATCH_NAMESPACE=podset-operator make run

所以,我的问题是:

  1. 究竟是什么Failed to update PodSet status {"error": "Operation cannot be fulfilled on podsets.app.example.com \"podset-sample\": the object has been modified; please apply your changes to the latest version and try again"}意思?
  2. 为什么会这样?
  3. 我能做些什么来摆脱这些错误?
4

2 回答 2

1

您需要在更新之前获取对象,这是因为您在尝试更新时拥有旧版本的对象。

编辑:

podSet := &appv1alpha1.PodSet{}
err := r.Get(context.Background(), req.NamespacedName, podSet)
if err != nil {
  return reconcile.Result{}, err
}

// Update the status if necessary
status := appv1alpha1.PodSetStatus{
    PodNames:          availableNames,
    AvailableReplicas: numAvailable,
}
if !reflect.DeepEqual(podSet.Status, status) {
   podSet.Status = status
   err = r.Status().Update(context.Background(), podSet)
   if err != nil {
      r.Log.Error(err, "Failed to update PodSet status")
      return reconcile.Result{}, err
  }
}

你必须从 kubernetes 带来最新版本的对象,以确保你拥有它的最新版本

于 2021-01-29T19:30:31.677 回答
0

您也可以使用补丁而不是更新来避免这种情况。

于 2021-04-21T07:47:06.210 回答