Refactor everything to separate files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/const.go Tue Dec 24 00:56:24 2019 -0600
@@ -0,0 +1,5 @@
+ Annotation = "rwgrim/autoupdate" --- a/main.go Mon Dec 23 22:33:47 2019 -0600
+++ b/main.go Tue Dec 24 00:56:24 2019 -0600
@@ -2,204 +2,28 @@
log "github.com/sirupsen/logrus"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
- Annotation = "rwgrim/autoupdate"
- log.SetOutput(os.Stdout)
- log.SetLevel(log.DebugLevel)
-func getConfig() (*rest.Config, error) {
- config, err := rest.InClusterConfig()
- if kubeconfig, found := os.LookupEnv("KUBECONFIG"); found {
- config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
- user, uErr := user.Current()
- path := filepath.Join(user.HomeDir, ".kube", "config")
- config, err = clientcmd.BuildConfigFromFlags("", path)
func registryGetImageID(repository string) (string, error) {
-func createLabelString(labelMap map[string]string) string {
- labels := make([]string, len(labelMap))
- for k, v := range labelMap {
- labels[i] = k + "=" + v
- return strings.Join(labels, ",")
-func checkContainer(clientset *kubernetes.Clientset, container corev1.Container, imageID string, images map[string]string) error {
- // now that we know that the container has a ImagePullPolicy of
- // Always, we can check it's image.
- regImageID, found := images[container.Image]
- id, err := registryGetImageID(container.Image)
- images[container.Image] = id
- if imageID != regImageID {
- log.Warnf("image id's differ, cycling pod: %s != %s", imageID, regImageID)
+ tickrate := 1 * time.Minute -func checkPod(clientset *kubernetes.Clientset, pod corev1.Pod, images map[string]string) error {
- for _, container := range pod.Spec.Containers {
- log.Debugf("checking %s/%s", pod.ObjectMeta.Namespace, pod.Name)
- if container.ImagePullPolicy != corev1.PullAlways {
- "%s/%s ImagePullPolicy is %s not %s",
- pod.ObjectMeta.Namespace,
- container.ImagePullPolicy,
+ log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) - // since the statuses is not a map, we have to iterate it to find
- // the containers we're interested in.
- for _, status := range pod.Status.ContainerStatuses {
- if status.Name != container.Name {
+ operator, err := NewOperator(tickrate, namespace)
- log.Infof("%s/%s:%s is not ready, skipping", pod.ObjectMeta.Namespace, pod.Name, container.Name)
- } else if err := checkContainer(clientset, container, status.ImageID, images); err != nil {
- "%s/%s:%s failed check: %v",
- pod.ObjectMeta.Namespace,
-func checkDeployment(clientset *kubernetes.Clientset, deployment appsv1.Deployment) error {
- // create a cache for all the images in this deployment
- images := map[string]string{}
- namespace := deployment.Namespace
- clientset.AppsV1().Deployments(namespace).Update(&deployment)
- listOpts := metav1.ListOptions{
- LabelSelector: createLabelString(deployment.Spec.Template.Labels),
- pods, err := clientset.CoreV1().Pods(namespace).List(listOpts)
- for _, pod := range pods.Items {
- if err := checkPod(clientset, pod, images); err != nil {
- tick := 1 * time.Minute
- // create a map which is namespace/deployment-name that stores the last
- cache := map[string]time.Time{}
- config, err := getConfig()
- clientset, err := kubernetes.NewForConfig(config)
- log.Info("checking for updates")
- deployments, err := clientset.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
- log.Warningf("failed to get deployments: %s", err.Error())
- for _, d := range deployments.Items {
- if rawInterval, found := d.Spec.Template.Annotations[Annotation]; found {
- name := d.Namespace + "/" + d.Name
- interval, err := time.ParseDuration(rawInterval)
- log.Warnf("%s has an invalid duration of '%s'", name, rawInterval)
- lastCheck, found := cache[name]
- log.Infof("found new deployment %s with interval %s", name, interval)
- lastCheck = time.Time{}
- if ts.Sub(lastCheck) > interval {
- log.Debugf("checking %s", name)
- checkDeployment(clientset, d)
- "%s will be checked in %s",
- time.Until(lastCheck.Add(interval)),
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/operator.go Tue Dec 24 00:56:24 2019 -0600
@@ -0,0 +1,202 @@
+ log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + cache map[string]time.Time + clientset *kubernetes.Clientset +func NewOperator(tickrate time.Duration, namespace string) (*Operator, error) { + cache: map[string]time.Time{}, + if err := o.getClientSet(); err != nil { +func (o *Operator) getClientSet() error { + config, err := rest.InClusterConfig() + if kubeconfig, found := os.LookupEnv("KUBECONFIG"); found { + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + user, uErr := user.Current() + path := filepath.Join(user.HomeDir, ".kube", "config") + config, err = clientcmd.BuildConfigFromFlags("", path) + clientset, err := kubernetes.NewForConfig(config) + o.clientset = clientset +func (o *Operator) Run() { + log.Info("checking deployments") + deployments, err := o.clientset.AppsV1().Deployments(o.namespace).List(metav1.ListOptions{}) + log.Warningf("failed to get deployments: %s", err.Error()) + for _, deployment := range deployments.Items { + // before checking the deployment, make sure we have the + if rawInterval, found := deployment.Spec.Template.Annotations[Annotation]; found { + o.checkDeployment(deployment, ts, rawInterval) +func (o *Operator) checkDeployment(deployment appsv1.Deployment, ts time.Time, rawInterval string) error { + name := deployment.Namespace + "/" + deployment.Name + interval, err := time.ParseDuration(rawInterval) + return fmt.Errorf("deployment %s has an invalid duration of %q", name, rawInterval) + lastCheck, found := o.cache[name] + log.Infof("found new deployment %s with interval %s", name, interval) + lastCheck = time.Time{} + if ts.Sub(lastCheck) < interval { + "%s will be checked in %s", + time.Until(lastCheck.Add(interval)), + log.Debugf("checking deployment %s", name) + // create a cache for the images in this deployment + images := map[string]string{} + o.clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment) + listOpts := metav1.ListOptions{ + LabelSelector: createLabelString(deployment.Spec.Template.Labels), + pods, err := o.clientset.CoreV1().Pods(deployment.Namespace).List(listOpts) + for _, pod := range pods.Items { + if err := o.checkPod(pod, images); err != nil { +func (o *Operator) checkPod(pod corev1.Pod, images map[string]string) error { + for _, container := range pod.Spec.Containers { + log.Debugf("checking %s/%s", pod.ObjectMeta.Namespace, pod.Name) + if container.ImagePullPolicy != corev1.PullAlways { + "%s/%s ImagePullPolicy is %s not %s", + pod.ObjectMeta.Namespace, + container.ImagePullPolicy, + // since the statuses is not a map, we have to iterate it to find + // the containers we're interested in. + for _, status := range pod.Status.ContainerStatuses { + if status.Name != container.Name { + log.Infof("%s/%s:%s is not ready, skipping", pod.ObjectMeta.Namespace, pod.Name, container.Name) + } else if err := o.checkContainer(container, status.ImageID, images); err != nil { + "%s/%s:%s failed check: %v", + pod.ObjectMeta.Namespace, +func (o *Operator) checkContainer(container corev1.Container, imageID string, images map[string]string) error { + // now that we know that the container has a ImagePullPolicy of + // Always, we can check it's image. + regImageID, found := images[container.Image] + id, err := registryGetImageID(container.Image) + images[container.Image] = id + if imageID != regImageID { + log.Warnf("image id's differ, cycling pod: %s != %s", imageID, regImageID) --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/util.go Tue Dec 24 00:56:24 2019 -0600
@@ -0,0 +1,16 @@
+// create a comma separate string of labels to match based on a map +func createLabelString(labelMap map[string]string) string { + labels := make([]string, len(labelMap)) + for k, v := range labelMap { + labels[i] = k + "=" + v + return strings.Join(labels, ",")