imfreedom/k8s-cluster

Add our logging manifests

2019-05-24, Gary Kramlich
1bfc56ec5ec5
Parents 1c4e04eea97f
Children 70ccf5f8013d
Add our logging manifests
--- a/00-namespaces.yaml Fri May 24 23:08:00 2019 -0500
+++ b/00-namespaces.yaml Fri May 24 23:08:35 2019 -0500
@@ -1,3 +1,5 @@
+# This file contains all of the namespaces that exist in the cluster.
+---
apiVersion: v1
kind: Namespace
metadata:
@@ -17,4 +19,10 @@
kind: Namespace
metadata:
name: adium
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kube-logging
+---
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/10-logging.yaml Fri May 24 23:08:35 2019 -0500
@@ -0,0 +1,249 @@
+# This manifest sets up an EFK stack for capturing logging from all of the pods
+# in the cluster.
+#
+# This is based heavily on https://www.digitalocean.com/community/tutorials/how-to-set-up-an-elasticsearch-fluentd-and-kibana-efk-logging-stack-on-kubernetes
+# but has some tweaks around elastic search when it comes to resources and
+# filesystem permissions.
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: elasticsearch
+ namespace: kube-logging
+ labels:
+ app: elasticsearch
+spec:
+ selector:
+ app: elasticsearch
+ clusterIP: None
+ ports:
+ - port: 9200
+ name: rest
+ - port: 9300
+ name: inter-node
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: es-cluster
+ namespace: kube-logging
+spec:
+ serviceName: elasticsearch
+ replicas: 3
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - elasticsearch
+ topologyKey: failure-domain.beta.kubernetes.io/region
+ weight: 100
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:7.1.0
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 1024Mi # we set the heap to 512, but java sucks so limit it at twice as much
+ requests:
+ cpu: 100m
+ memory: 512Mi
+ ports:
+ - containerPort: 9200
+ name: rest
+ protocol: TCP
+ - containerPort: 9300
+ name: inter-node
+ protocol: TCP
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ env:
+ - name: cluster.name
+ value: k8s-logs
+ - name: node.name
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: cluster.initial_master_nodes
+ value: "es-cluster-0,es-cluster-1,es-cluster-2"
+ - name: discovery.zen.ping.unicast.hosts
+ value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
+ - name: ES_JAVA_OPTS
+ value: "-Xms512m -Xmx512m -XX:+UnlockExperimentalVMOptions"
+ initContainers:
+ - name: increase-vm-max-map
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ runAsUser: 0
+ privileged: true
+ - name: increase-fd-ulimit
+ image: busybox
+ command: ["sh", "-c", "ulimit -n 65536"]
+ securityContext:
+ runAsUser: 0
+ privileged: true
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: elasticsearch
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ storageClassName: do-block-storage
+ resources:
+ requests:
+ storage: 10Gi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kibana
+ namespace: kube-logging
+ labels:
+ app: kibana
+spec:
+ ports:
+ - port: 5601
+ selector:
+ app: kibana
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kibana
+ namespace: kube-logging
+ labels:
+ app: kibana
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: kibana
+ template:
+ metadata:
+ labels:
+ app: kibana
+ spec:
+ containers:
+ - name: kibana
+ image: docker.elastic.co/kibana/kibana:7.1.0
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ env:
+ - name: ELASTICSEARCH_URL
+ value: http://elasticsearch.kube-logging.svc.cluster.local:9200
+ ports:
+ - containerPort: 5601
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: fluentd
+ namespace: kube-logging
+ labels:
+ app: fluentd
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: fluentd
+ labels:
+ app: fluentd
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: fluentd
+roleRef:
+ kind: ClusterRole
+ name: fluentd
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+ - kind: ServiceAccount
+ name: fluentd
+ namespace: kube-logging
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd
+ namespace: kube-logging
+ labels:
+ app: fluentd
+spec:
+ selector:
+ matchLabels:
+ app: fluentd
+ template:
+ metadata:
+ labels:
+ app: fluentd
+ spec:
+ serviceAccount: fluentd
+ serviceAccountName: fluentd
+ containers:
+ - name: fluentd
+ image: fluent/fluentd-kubernetes-daemonset:v0.12-debian-elasticsearch
+ env:
+ - name: FLUENT_ELASTICSEARCH_HOST
+ value: "elasticsearch"
+ - name: FLUENT_ELASTICSEARCH_PORT
+ value: "9200"
+ - name: FLUENT_ELASTICSEARCH_SCHEME
+ value: "http"
+ - name: FLUENT_UID
+ value: "0"
+ resources:
+ limits:
+ cpu: 300m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+