共计 7970 个字符,预计需要花费 20 分钟才能阅读完成。
1.什么是ELK stack ?
ELK stack 是三个开源项目的统称,E代表Elasticsearch,一种基于Lucene的高效搜索分析引擎。L代表Logstash,一种服务端的数据收集中间件。K就代表Kibana了,用于数据可视化,图形化。
2.环境准备
既然知道了什么是ELK,自然要准备环境了,笔者将基于docker创建拥有3个节点的Elasticsearch集群,同时一起构建的还有Kibana和Logstash(笔者最终消耗3-4G内存)。由于是Elasticsearch集群,配置会和单节点有所不同,最重要的就是安全问题了,比方说我们的Kibana总不能让任何人都能登录控制台吧,节点间通讯数据总得加密传输吧?
如果是单节点开发,minimal就够用了,此时xpack.security.enabled启用就完了。多节点集群环境xpack.security.enabled开启同时xpack.security.transport.ssl.enabled也必须开启,否则无法启动,为了节点通讯安全,官方做了这一限制。xpack.security.http.ssl.enabled是为了http客户端与Elasticsearch交流安全考虑,一般而言只有走外网环境才会考虑数据加密,走内网环境的话,非必要是可以不启用,不配置的,当然一定要安全就配置。
首先我们定义docker的env文件(默认.dev文件),用于定义变量
COMPOSE_PROJECT_NAME=es
CERTS_DIR=/usr/share/elasticsearch/config/certificates
VERSION=7.16.0
各节点配置instance.yml
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1
- name: es02
dns:
- es02
- localhost
ip:
- 127.0.0.1
- name: es03
dns:
- es03
- localhost
ip:
- 127.0.0.1
接着就是证书初始化配置create-certs.yml
version: '2.2'
services:
create_certs:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: create_certs
command: >
bash -c '
if [[ ! -f /certs/bundle.zip ]]; then
bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip;
unzip /certs/bundle.zip -d /certs;
fi;
chown -R 1000:0 /certs
'
working_dir: /usr/share/elasticsearch
volumes:
- certs:/certs
- .:/usr/share/elasticsearch/config/certificates
networks:
- elk
volumes:
certs:
driver: local
networks:
elk:
driver: bridge
最后是ELK的配置elk-compose.yml
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- certs:$CERTS_DIR
#- /opt/elasticsearch/es01/config:/usr/share/elasticsearch/config
- /opt/elasticsearch/es01/data:/usr/share/elasticsearch/data
- /opt/elasticsearch/es01/logs:/usr/share/elasticsearch/logs
ports:
- 9200:9200
networks:
- elk
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- certs:$CERTS_DIR
#- /opt/elasticsearch/es02/config:/usr/share/elasticsearch/config
- /opt/elasticsearch/es02/data:/usr/share/elasticsearch/data
- /opt/elasticsearch/es02/logs:/usr/share/elasticsearch/logs
networks:
- elk
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es03/es03.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- certs:$CERTS_DIR
#- /opt/elasticsearch/es03/config:/usr/share/elasticsearch/config
- /opt/elasticsearch/es03/data:/usr/share/elasticsearch/data
- /opt/elasticsearch/es03/logs:/usr/share/elasticsearch/logs
networks:
- elk
kibana:
image: docker.elastic.co/kibana/kibana:${VERSION}
container_name: kibana
ports:
- 5601:5601
volumes:
- certs:$CERTS_DIR
environment:
SERVER_BASEPATH: /kibana
ELASTICSEARCH_URL: http://es01:9200
ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]'
ELASTICSEARCH_USERNAME: kibana_system
ELASTICSEARCH_PASSWORD: changeme
#ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt
#SERVER_SSL_ENABLED: "true"
#SERVER_SSL_KEY: $CERTS_DIR/kibana/kibana.key
#SERVER_SSL_CERTIFICATE: $CERTS_DIR/kibana/kibana.crt
networks:
- elk
logstash:
image: docker.elastic.co/logstash/logstash:${VERSION}
container_name: logstash
ports:
- "5044:5044"
- "4560:4560"
- "5000:5000/tcp"
- "5000:5000/udp"
- "9600:9600"
environment:
- "LS_JAVA_OPTS=-Xmx256m -Xms256m"
- xpack.monitoring.elasticsearch.hosts=["http://es01:9200","http://es02:9200","http://es03:9200"]
- xpack.monitoring.elasticsearch.username=elastic
- xpack.monitoring.elasticsearch.password=changeme
networks:
- elk
#volumes:
#- /opt/logstash/plugin-input/logstash-tcp-input.conf:/usr/share/logstash/config/logstash-tcp-input.conf
depends_on:
- es01
volumes:
certs:
driver: local
networks:
elk:
driver: bridge
细心读者会发现笔者注释了配置文件的挂载,这个后面会用到。
好了,配置文件准备全乎了,键入命令
docker-compose -f create-certs.yml run --rm create_certs
这将生成包含证书的数据卷,同时移除我们创建的容器,docker volume自行查看。
主角登场
docker-compose -f elk-compose.yml up -d
不出意外的话主角会打个照面就跑了,因为我们挂载的目录主角没有权限写,一生气就跑了,
这里默认启动用户是root组的elastic(还是elasticsearch笔者记不太清了),只需同组用户追加写权限就可以了
复制配置文件,并取消前文提到的注释
docker cp es01:/usr/share/elasticsearch/config /opt/elasticsearch/es01 ...
赋予权限
chmod -R 775 /opt/elasticsearch/es*
停止Kibana和Logstash
docker-compose -f elk-compose.yml stop
重新启动
docker-compose -f elk-compose.yml up -d
这时候3个Elasticsearch节点就起来了,但是我们的Kibana还没起来,Kibana想着,老哥我找Elasticsearch的密码没给我啊。初始化密码
docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \
auto --batch --url http://es01:9200"
控制台会输出初始化的帐号密码,找到kibana的以及elastic的密码,修改我们的主角yml文件中kibana系统用户以及Logstash的帐号密码(changeme),再次停止并重启,稍等片刻,
ELK基础环境算搭建完毕了,此时我们暂不讨论Logstash的插件。浏览器输入localhost:5601就访问到我们的Kibana了。输入elastic用户名密码即可登录控制台。
3.Java集成
要集成自然要知道从何处下手,集成谁,这就是我们Logstash的专场了
如上图所示,Logstash负责将它收集到的数据发送到约定规则的Elasticsearch索引上,Kibana再将数据从Elasticsearch采集出来进行UI展示。所以说我们现在要解决的问题就是如何将数据传输到Logstash。我们可以使用各种Beats配合Logstash采集数据,也可以直接连接Logstash并通过Logstash的插件格式转化将数据传输到Logstash,笔者采用直连Logstash这种方式描绘。
集成其实很简单,下面我们以Springboot+Logback的Maven项目演示
添加Logback日志到Logstash的桥梁
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>${yourVersion}</version>
</dependency>
添加Logback到Logstash的日志文件配置logback.xml或者logback-spring.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/base.xml" />
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<appender name="LOGSTASH-TCP" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!-- logstash地址 -->
<destination>127.0.0.1:4560</destination>
<!-- 错误日志才发送 -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<!-- 日志的基本格式 -->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"logLevel": "%level",
"serviceName": "${springAppName:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"message": "%message",
"exception": "%ex{full}
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="LOGSTASH-TCP"/>
</root>
</configuration>
好了,客户端集成完毕了,我们像往常一样打日志就可以了。接下来只需配置Logstash的插件了
配置logstash.conf文件
input {
tcp {
mode => "server"
host => "yourIp"
port => 4560
codec => json
}
}
output {
elasticsearch {
hosts => "http://es01:9200"
index => "logstash-springboot-%{+YYYY.MM.dd}"
user => "user"
password => "password"
}
}
注意,前文我们并没有挂载Logstash的配置文件,我们先拷贝一份Logstash容器里的配置文件出来,修改后再挂载,笔者最终挂载目录如下,
data目录记得赋予权限。到这里可以说简单的集成已经完毕,后期我们可能会根据需要定制索引模板,更换日志Appender等等,当然那都是后话了。
到这里,你们以为就完了?有一天,线上出现Bug了,程序员排查发现没有错误日志,唯一可能复现Bug的地方打的日志是Info级别的,怎么办?去服务器把几百Mb甚至上G的日志文件下载下来,然后像以前那样去翻阅?亦或者线上去追踪下日志文件输出?这时候,我们该想想如何才能动态的配置我们的日志系统呢?