Kubernetes ELK Elasticsearch, Logstash, Kibana
Deploy Elasticsearch
Setup the host node
The vm.max_map_count
kernel setting needs to be set to at least 262144 for production use. Make sure the node(s) that will host Elasticsearch have the following config:
Copy sysctl -w vm.max_map_count=262144
Create the data dir:
Copy mkdir /storage/storage-001/mnt-elasticsearch
chown nobody:nogroup /storage/storage-001/mnt-elasticsearch/
Create the namespace
Connect to your kubectl workstation and create the namespace:
Copy kubectl create namespace elk
Create the ConfigMap
Create Elasticsearch config file:
Copy cat <<EOF >>elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
discovery.type: single-node
EOF
Create its ConfigMap
:
Copy kubectl -n elk \
create configmap cm-elasticsearch \
--from-file=elasticsearch.yml \
-o yaml --dry-run | kubectl apply -f -
If you need to update the ConfigMap
, run:
Copy kubectl -n elk \
create configmap cm-elasticsearch \
--from-file=elasticsearch.yml \
-o yaml --dry-run | kubectl apply -f -
Then run:
Copy kubectl -n elk scale deployment/elasticsearch --replicas=0
kubectl -n elk scale deployment/elasticsearch --replicas=1
Deploy Elasticsearch
Run:
Copy kubectl create -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
namespace: elk
labels:
app: elasticsearch
spec:
replicas: 1
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
securityContext:
runAsUser: 65534
fsGroup: 65534
hostNetwork: true
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:6.7.0
ports:
- containerPort: 9200
- containerPort: 9300
env:
- name: discovery.type
value: "single-node"
volumeMounts:
- name: config-volume
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
- name: mnt-elasticsearch
mountPath: /usr/share/elasticsearch/data
volumes:
- name: config-volume
configMap:
name: cm-elasticsearch
- name: mnt-elasticsearch
hostPath:
path: /storage/storage-001/mnt-elasticsearch
nodeSelector:
kubernetes.io/hostname: k8snode
EOF
Create Elasticsearch service
Run:
Copy kubectl create -f - <<EOF
---
apiVersion: v1
kind: Service
metadata:
labels:
app: elasticsearch
name: srv-elasticsearch
namespace: elk
spec:
externalTrafficPolicy: Cluster
ports:
- name: "port-9200"
nodePort: 30920
port: 9200
protocol: TCP
targetPort: 9200
- name: "port-9300"
nodePort: 30930
port: 9300
protocol: TCP
targetPort: 9300
selector:
app: elasticsearch
sessionAffinity: None
type: NodePort
EOF
Test
Get indices:
Copy curl PUT-YOUR-NODE-IP-HERE:30920
curl PUT-YOUR-NODE-IP-HERE:30920/_cat/indices?v
Post content:
Copy curl -H "Content-Type: application/json" -XPOST "http://<HOST>:<PORT>/YOUR-INDEX/YOUR-TYPE/optionalUniqueId" -d "{ \"field\" : \"value\"}"
Another example of post:
Copy curl -X POST -H "Content-Type: application/json" -H "Cache-Control: no-cache" -d '{
"user" : "Arun Thundyill Saseendran",
"post_date" : "2009-03-23T12:30:00",
"message" : "trying out Elasticsearch"
}' "http://<HOST>:<IP>/sampleindex/sampletype/"
Deploy Logstash
Create the ConfigMap (config file)
Create the config file:
Copy cat <<EOF >>logstash.yml
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
EOF
Create its ConfigMap
:
Copy kubectl -n elk \
create configmap cm-config-logstash \
--from-file=logstash.yml \
-o yaml --dry-run | kubectl apply -f -
If you need to update the ConfigMap
, run:
Copy kubectl -n elk \
create configmap cm-config-logstash \
--from-file=logstash.yml \
-o yaml --dry-run | kubectl apply -f -
Then run:
Copy kubectl -n elk scale deployment/logstash --replicas=0
kubectl -n elk scale deployment/logstash --replicas=1
Create the ConfigMap (pipeline)
Create the config file:
Copy cat <<EOF >>logstash.conf
#input {
# tcp {
# port => 5959
# }
#}
input {
http {
port => 5959
response_headers => {
"Access-Control-Allow-Origin" => "*"
"Content-Type" => "application/json"
"Access-Control-Allow-Headers" => "Origin, X-Requested-With, Content-Type, Accept"
}
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
hosts => "PUT-YOUR-HOST-HERE:PUT-YOUR-PORT-HERE"
}
}
EOF
Replace output.elasticsearch.hosts
with your Elasticsearch host and port.
Create its ConfigMap
:
Copy kubectl -n elk \
create configmap cm-pipeline-logstash \
--from-file=logstash.conf \
-o yaml --dry-run | kubectl apply -f -
If you need to update the ConfigMap
, run:
Copy kubectl -n elk \
create configmap cm-pipeline-logstash \
--from-file=logstash.conf \
-o yaml --dry-run | kubectl apply -f -
Then run:
Copy kubectl -n elk scale deployment/logstash --replicas=0
kubectl -n elk scale deployment/logstash --replicas=1
Deploy
Connect to your node and create the data dir:
Copy mkdir -p /storage/storage-001/mnt-logstash
chown nobody:nogroup /storage/storage-001/mnt-logstash
Connect to your kubectl workstation and run:
Copy kubectl create -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: elk
labels:
app: logstash
spec:
replicas: 1
selector:
matchLabels:
app: logstash
template:
metadata:
labels:
app: logstash
spec:
securityContext:
runAsUser: 65534
fsGroup: 65534
hostNetwork: true
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:6.7.0
ports:
- containerPort: 5959
env:
- name: discovery.type
value: "single-node"
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config/logstash.yml
subPath: logstash.yml
- name: pipeline-volume
mountPath: /usr/share/logstash/pipeline/logstash.conf
subPath: logstash.conf
- name: mnt-logstash
mountPath: /usr/share/logstash/data
volumes:
- name: config-volume
configMap:
name: cm-config-logstash
- name: pipeline-volume
configMap:
name: cm-pipeline-logstash
- name: mnt-logstash
hostPath:
path: /storage/storage-001/mnt-logstash
nodeSelector:
kubernetes.io/hostname: k8snode
EOF
Create service
Run:
Copy kubectl create -f - <<EOF
---
apiVersion: v1
kind: Service
metadata:
labels:
app: logstash
name: srv-logstash
namespace: elk
spec:
externalTrafficPolicy: Cluster
ports:
- name: "port-5959"
nodePort: 30595
port: 5959
protocol: TCP
targetPort: 5959
selector:
app: logstash
sessionAffinity: None
type: NodePort
EOF
Test
Get service info:
Copy curl -XGET 'PUT-YOUR-HOST-HERE:9600/_node/logging?pretty'
Telnet test:
Copy telnet PUT-NODE-HOST-HERE 30595
Deploy Kibana
Create the ConfigMap
Create the config file:
Copy cat <<EOF >>kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://PUT-YOUR-HOST-HERE:30920" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
EOF
Replace elasticsearch.hosts
with your Elasticsearch host and port.
Create its ConfigMap
:
Copy kubectl -n elk \
create configmap cm-kibana \
--from-file=kibana.yml \
-o yaml --dry-run | kubectl apply -f -
If you need to update the ConfigMap
, run:
Copy kubectl -n elk \
create configmap cm-kibana \
--from-file=kibana.yml \
-o yaml --dry-run | kubectl apply -f -
Then run:
Copy kubectl -n elk scale deployment/kibana --replicas=0
kubectl -n elk scale deployment/kibana --replicas=1
Deploy
Connect to your node and create the data dir:
Copy mkdir /storage/storage-001/mnt-kibana
chown nobody:nogroup /storage/storage-001/mnt-kibana/
Connect to your kubectl workstation and run:
Copy kubectl create -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elk
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
securityContext:
runAsUser: 65534
fsGroup: 65534
hostNetwork: true
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:6.7.0
ports:
- containerPort: 5601
env:
- name: ELASTICSEARCH_HOSTS
value: "http://PUT-YOUR-HOST-HERE:30920"
volumeMounts:
- name: config-volume
mountPath: /usr/share/kibana/config/kibana.yml
subPath: kibana.yml
- name: mnt-kibana
mountPath: /usr/share/kibana/data
volumes:
- name: config-volume
configMap:
name: cm-kibana
- name: mnt-kibana
hostPath:
path: /storage/storage-001/mnt-kibana
nodeSelector:
kubernetes.io/hostname: k8snode
EOF
Replace spec.template.spec.containers.env
with your Elasticsearch host and port.
Create service
Run:
Copy kubectl create -f - <<EOF
---
apiVersion: v1
kind: Service
metadata:
labels:
app: kibana
name: srv-kibana
namespace: elk
spec:
externalTrafficPolicy: Cluster
ports:
- name: "port-5601"
nodePort: 30560
port: 5601
protocol: TCP
targetPort: 5601
selector:
app: kibana
sessionAffinity: None
type: NodePort
EOF
Test
Curl test:
Copy curl http://PUT-YOUR-HOST-HERE:30560
Deploy filebeat
Example to stream log files from /elk/*.log
to elasticsearch
.
Copy cat <<EOF >>filebeat.docker.yml
filebeat:
config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
autodiscover:
providers:
- type: docker
hints.enabled: true
prospectors:
- input_type: log
paths:
- /elk/*.log
#output.logstash:
# hosts: ["PUT-YOUR-IP-HERE:30595"]
output.elasticsearch:
hosts: ["http://PUT-YOUR-IP-HERE:30920"]
#index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}"
#index: "test-filebeat"
logging:
files:
rotateeverybytes: 10485760 # = 10MB
EOF
Text:
Copy docker run -tid \
--name=filebeat \
--user=root \
--volume="$(pwd)/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro" \
--volume="/elk:/elk:ro" \
--volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \
--volume="/var/run/docker.sock:/var/run/docker.sock:ro" \
docker.elastic.co/beats/filebeat:6.7.0 filebeat
Text:
Text:
Text:
Text:
Text: