grim/tagpull

Refactor everything to separate files

2019-12-24, Gary Kramlich
eb779bb2a572
Parents a5cfb94766b5
Children 43e9ed2e7723
Refactor everything to separate files
  • +5 -0
    const.go
  • +10 -186
    main.go
  • +202 -0
    operator.go
  • +16 -0
    util.go
  • --- /dev/null Thu Jan 01 00:00:00 1970 +0000
    +++ b/const.go Tue Dec 24 00:56:24 2019 -0600
    @@ -0,0 +1,5 @@
    +package main
    +
    +const (
    + Annotation = "rwgrim/autoupdate"
    +)
    --- a/main.go Mon Dec 23 22:33:47 2019 -0600
    +++ b/main.go Tue Dec 24 00:56:24 2019 -0600
    @@ -2,204 +2,28 @@
    import (
    "os"
    - "os/user"
    - "path/filepath"
    - "strings"
    "time"
    log "github.com/sirupsen/logrus"
    - appsv1 "k8s.io/api/apps/v1"
    - corev1 "k8s.io/api/core/v1"
    - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    - "k8s.io/client-go/kubernetes"
    - "k8s.io/client-go/rest"
    - "k8s.io/client-go/tools/clientcmd"
    )
    -const (
    - Annotation = "rwgrim/autoupdate"
    -)
    -
    -func init() {
    - log.SetOutput(os.Stdout)
    - log.SetLevel(log.DebugLevel)
    -}
    -
    -func getConfig() (*rest.Config, error) {
    - config, err := rest.InClusterConfig()
    - if err != nil {
    - if kubeconfig, found := os.LookupEnv("KUBECONFIG"); found {
    - config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
    - } else {
    - user, uErr := user.Current()
    - if uErr != nil {
    - return nil, uErr
    - }
    -
    - path := filepath.Join(user.HomeDir, ".kube", "config")
    - config, err = clientcmd.BuildConfigFromFlags("", path)
    - }
    - }
    -
    - return config, err
    -}
    -
    func registryGetImageID(repository string) (string, error) {
    return "", nil
    }
    -func createLabelString(labelMap map[string]string) string {
    - labels := make([]string, len(labelMap))
    - i := 0
    - for k, v := range labelMap {
    - labels[i] = k + "=" + v
    - i++
    - }
    - return strings.Join(labels, ",")
    -}
    -
    -func checkContainer(clientset *kubernetes.Clientset, container corev1.Container, imageID string, images map[string]string) error {
    - // now that we know that the container has a ImagePullPolicy of
    - // Always, we can check it's image.
    - regImageID, found := images[container.Image]
    - if !found {
    - id, err := registryGetImageID(container.Image)
    - if err != nil {
    - return err
    - }
    - images[container.Image] = id
    - regImageID = id
    - }
    -
    - if imageID != regImageID {
    - log.Warnf("image id's differ, cycling pod: %s != %s", imageID, regImageID)
    - }
    -
    - return nil
    -}
    +func main() {
    + tickrate := 1 * time.Minute
    + namespace := ""
    -func checkPod(clientset *kubernetes.Clientset, pod corev1.Pod, images map[string]string) error {
    - for _, container := range pod.Spec.Containers {
    - log.Debugf("checking %s/%s", pod.ObjectMeta.Namespace, pod.Name)
    -
    - if container.ImagePullPolicy != corev1.PullAlways {
    - log.Warnf(
    - "%s/%s ImagePullPolicy is %s not %s",
    - pod.ObjectMeta.Namespace,
    - pod.Name,
    - container.ImagePullPolicy,
    - corev1.PullAlways,
    - )
    - }
    + log.SetOutput(os.Stdout)
    + log.SetLevel(log.DebugLevel)
    - // since the statuses is not a map, we have to iterate it to find
    - // the containers we're interested in.
    - for _, status := range pod.Status.ContainerStatuses {
    - if status.Name != container.Name {
    - continue
    - }
    + operator, err := NewOperator(tickrate, namespace)
    + if err != nil {
    + log.Fatalf("%v", err)
    - if !status.Ready {
    - log.Infof("%s/%s:%s is not ready, skipping", pod.ObjectMeta.Namespace, pod.Name, container.Name)
    - } else if err := checkContainer(clientset, container, status.ImageID, images); err != nil {
    - log.Warnf(
    - "%s/%s:%s failed check: %v",
    - pod.ObjectMeta.Namespace,
    - pod.Name,
    - container.Name,
    - err,
    - )
    - }
    -
    - break
    - }
    + return
    }
    - return nil
    -}
    -
    -func checkDeployment(clientset *kubernetes.Clientset, deployment appsv1.Deployment) error {
    - // create a cache for all the images in this deployment
    - images := map[string]string{}
    -
    - namespace := deployment.Namespace
    -
    - clientset.AppsV1().Deployments(namespace).Update(&deployment)
    -
    - listOpts := metav1.ListOptions{
    - LabelSelector: createLabelString(deployment.Spec.Template.Labels),
    - }
    - pods, err := clientset.CoreV1().Pods(namespace).List(listOpts)
    - if err != nil {
    - return err
    - }
    -
    - for _, pod := range pods.Items {
    - if err := checkPod(clientset, pod, images); err != nil {
    - log.Warnf("%v", err)
    - }
    - }
    -
    - return nil
    + operator.Run()
    }
    -
    -func main() {
    - namespace := ""
    - tick := 1 * time.Minute
    -
    - // create a map which is namespace/deployment-name that stores the last
    - // time it was checked
    - cache := map[string]time.Time{}
    -
    - config, err := getConfig()
    - if err != nil {
    - panic(err.Error())
    - }
    -
    - clientset, err := kubernetes.NewForConfig(config)
    - if err != nil {
    - panic(err.Error())
    - }
    -
    - for {
    - log.Info("checking for updates")
    -
    - ts := time.Now().UTC()
    -
    - deployments, err := clientset.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
    - if err != nil {
    - log.Warningf("failed to get deployments: %s", err.Error())
    - } else {
    - for _, d := range deployments.Items {
    - if rawInterval, found := d.Spec.Template.Annotations[Annotation]; found {
    - name := d.Namespace + "/" + d.Name
    -
    - interval, err := time.ParseDuration(rawInterval)
    - if err != nil {
    - log.Warnf("%s has an invalid duration of '%s'", name, rawInterval)
    - }
    -
    - lastCheck, found := cache[name]
    - if !found {
    - log.Infof("found new deployment %s with interval %s", name, interval)
    - lastCheck = time.Time{}
    - }
    -
    - if ts.Sub(lastCheck) > interval {
    - log.Debugf("checking %s", name)
    - checkDeployment(clientset, d)
    - cache[name] = ts
    - } else {
    - log.Debugf(
    - "%s will be checked in %s",
    - name,
    - time.Until(lastCheck.Add(interval)),
    - )
    - }
    - }
    - }
    - }
    -
    - time.Sleep(tick)
    - }
    -}
    --- /dev/null Thu Jan 01 00:00:00 1970 +0000
    +++ b/operator.go Tue Dec 24 00:56:24 2019 -0600
    @@ -0,0 +1,202 @@
    +package main
    +
    +import (
    + "fmt"
    + "os"
    + "os/user"
    + "path/filepath"
    + "time"
    +
    + log "github.com/sirupsen/logrus"
    + appsv1 "k8s.io/api/apps/v1"
    + corev1 "k8s.io/api/core/v1"
    + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    + "k8s.io/client-go/kubernetes"
    + "k8s.io/client-go/rest"
    + "k8s.io/client-go/tools/clientcmd"
    +)
    +
    +type Operator struct {
    + tickrate time.Duration
    + namespace string
    + cache map[string]time.Time
    +
    + clientset *kubernetes.Clientset
    +}
    +
    +func NewOperator(tickrate time.Duration, namespace string) (*Operator, error) {
    + o := &Operator{
    + tickrate: tickrate,
    + namespace: namespace,
    + cache: map[string]time.Time{},
    + }
    +
    + if err := o.getClientSet(); err != nil {
    + return nil, err
    + }
    +
    + return o, nil
    +}
    +
    +func (o *Operator) getClientSet() error {
    + config, err := rest.InClusterConfig()
    + if err != nil {
    + if kubeconfig, found := os.LookupEnv("KUBECONFIG"); found {
    + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
    + } else {
    + user, uErr := user.Current()
    + if uErr != nil {
    + return uErr
    + }
    +
    + path := filepath.Join(user.HomeDir, ".kube", "config")
    + config, err = clientcmd.BuildConfigFromFlags("", path)
    + }
    + }
    +
    + if err != nil {
    + return err
    + }
    +
    + clientset, err := kubernetes.NewForConfig(config)
    + if err != nil {
    + return err
    + }
    +
    + o.clientset = clientset
    +
    + return nil
    +}
    +
    +func (o *Operator) Run() {
    + for {
    + log.Info("checking deployments")
    +
    + ts := time.Now().UTC()
    +
    + deployments, err := o.clientset.AppsV1().Deployments(o.namespace).List(metav1.ListOptions{})
    + if err != nil {
    + log.Warningf("failed to get deployments: %s", err.Error())
    + } else {
    + for _, deployment := range deployments.Items {
    + // before checking the deployment, make sure we have the
    + // annotation.
    + if rawInterval, found := deployment.Spec.Template.Annotations[Annotation]; found {
    + o.checkDeployment(deployment, ts, rawInterval)
    + }
    + }
    + }
    +
    + time.Sleep(o.tickrate)
    + }
    +}
    +
    +func (o *Operator) checkDeployment(deployment appsv1.Deployment, ts time.Time, rawInterval string) error {
    + name := deployment.Namespace + "/" + deployment.Name
    +
    + interval, err := time.ParseDuration(rawInterval)
    + if err != nil {
    + return fmt.Errorf("deployment %s has an invalid duration of %q", name, rawInterval)
    + }
    +
    + lastCheck, found := o.cache[name]
    + if !found {
    + log.Infof("found new deployment %s with interval %s", name, interval)
    + lastCheck = time.Time{}
    + }
    +
    + if ts.Sub(lastCheck) < interval {
    + log.Debugf(
    + "%s will be checked in %s",
    + name,
    + time.Until(lastCheck.Add(interval)),
    + )
    +
    + return nil
    + }
    +
    + log.Debugf("checking deployment %s", name)
    +
    + // create a cache for the images in this deployment
    + images := map[string]string{}
    +
    + o.clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment)
    +
    + listOpts := metav1.ListOptions{
    + LabelSelector: createLabelString(deployment.Spec.Template.Labels),
    + }
    + pods, err := o.clientset.CoreV1().Pods(deployment.Namespace).List(listOpts)
    + if err != nil {
    + return err
    + }
    +
    + for _, pod := range pods.Items {
    + if err := o.checkPod(pod, images); err != nil {
    + log.Warnf("%v", err)
    + }
    + }
    +
    + o.cache[name] = ts
    +
    + return nil
    +}
    +
    +func (o *Operator) checkPod(pod corev1.Pod, images map[string]string) error {
    + for _, container := range pod.Spec.Containers {
    + log.Debugf("checking %s/%s", pod.ObjectMeta.Namespace, pod.Name)
    +
    + if container.ImagePullPolicy != corev1.PullAlways {
    + log.Warnf(
    + "%s/%s ImagePullPolicy is %s not %s",
    + pod.ObjectMeta.Namespace,
    + pod.Name,
    + container.ImagePullPolicy,
    + corev1.PullAlways,
    + )
    + }
    +
    + // since the statuses is not a map, we have to iterate it to find
    + // the containers we're interested in.
    + for _, status := range pod.Status.ContainerStatuses {
    + if status.Name != container.Name {
    + continue
    + }
    +
    + if !status.Ready {
    + log.Infof("%s/%s:%s is not ready, skipping", pod.ObjectMeta.Namespace, pod.Name, container.Name)
    + } else if err := o.checkContainer(container, status.ImageID, images); err != nil {
    + log.Warnf(
    + "%s/%s:%s failed check: %v",
    + pod.ObjectMeta.Namespace,
    + pod.Name,
    + container.Name,
    + err,
    + )
    + }
    +
    + break
    + }
    + }
    +
    + return nil
    +}
    +
    +func (o *Operator) checkContainer(container corev1.Container, imageID string, images map[string]string) error {
    + // now that we know that the container has a ImagePullPolicy of
    + // Always, we can check it's image.
    + regImageID, found := images[container.Image]
    + if !found {
    + id, err := registryGetImageID(container.Image)
    + if err != nil {
    + return err
    + }
    + images[container.Image] = id
    + regImageID = id
    + }
    +
    + if imageID != regImageID {
    + log.Warnf("image id's differ, cycling pod: %s != %s", imageID, regImageID)
    + }
    +
    + return nil
    +}
    --- /dev/null Thu Jan 01 00:00:00 1970 +0000
    +++ b/util.go Tue Dec 24 00:56:24 2019 -0600
    @@ -0,0 +1,16 @@
    +package main
    +
    +import (
    + "strings"
    +)
    +
    +// create a comma separate string of labels to match based on a map
    +func createLabelString(labelMap map[string]string) string {
    + labels := make([]string, len(labelMap))
    + i := 0
    + for k, v := range labelMap {
    + labels[i] = k + "=" + v
    + i++
    + }
    + return strings.Join(labels, ",")
    +}