0%

20230218 docker方式安装ElasticSearch

docker方式安装ElasticSearch

前言:

项目中要用到 ElasticSearch,以前都是使用单机版,既然是正式使用,就需要学习一下集群啥的,也要把安全性考虑进去。

刚入手的MacBook Pro M2 16寸( M2 ARM64) ,其实对容器以及虚拟机的兼容性还是有点不确定,所以这次会同时在旧的 MacBook Pro 2015 15寸( Intel I7) 同时安装测试。

参考:搜了一下,往上大多都是同样的方式安装,我基本参考 简书上“卖菇凉的小火柴丶”的文章 docker-compose安装elasticsearch8.5.0集群

先测试单机版

准备好环境文件 .env ,这个env文件会在后面几个测试方案中一直使用。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# elastic账号的密码 (至少六个字符),别用纯数字,否则死给你看
ELASTIC_PASSWORD=iampassword

# kibana_system账号的密码 (至少六个字符),该账号仅用于一些kibana的内部设置,不能用来查询es,,别用纯数字,否则死给你看
KIBANA_PASSWORD=iampassword

# es和kibana的版本
STACK_VERSION=7.17.9

# 集群名字
CLUSTER_NAME=docker-cluster

# x-pack安全设置,这里选择basic,基础设置,如果选择了trail,则会在30天后到期
LICENSE=basic
#LICENSE=trial

# es映射到宿主机的的端口
ES_PORT=9200

# kibana映射到宿主机的的端口
KIBANA_PORT=5601

# es容器的内存大小,请根据自己硬件情况调整(字节为单位,当前1G)
MEM_LIMIT=1073741824

# 命名空间,会体现在容器名的前缀上
COMPOSE_PROJECT_NAME=es

然后准备 docker-compose.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
version: '3'
services:
es-single:
image: elasticsearch:${STACK_VERSION}
container_name: es-single
volumes:
- ./data/esdata01:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
environment:
- node.name=es-single
- cluster.name=es-docker-cluster
- discovery.type=single-node
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1

kibana-single:
depends_on:
- es-single
image: kibana:${STACK_VERSION}
container_name: kibana-single
ports:
- ${KIBANA_PORT}:5601
volumes:
- ./data/kibanadata:/usr/share/kibana/data

environment:
- SERVERNAME=kibana-single
- ELASTICSEARCH_HOSTS=http://es-single:9200
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
mem_limit: ${MEM_LIMIT}

然后启动 docker-compose up -d

稍等十几秒后在查看 curl -u elastic:iampassword http://localhost:9200 (浏览器里也可以直接查看,不过这样显得牛逼)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
{
"name" : "es-single",
"cluster_name" : "es-docker-cluster",
"cluster_uuid" : "0pIB-A9kScyLkhj6YkYSjA",
"version" : {
"number" : "7.17.9",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "ef48222227ee6b9e70e502f0f0daa52435ee634d",
"build_date" : "2023-01-31T05:34:43.305517834Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}

再过十几秒后网页打开 http://localhost:5601 看就可以看到登录页面。

装逼的样子就是这样

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
$ curl -v  http://localhost:5601
* Trying 127.0.0.1:5601...
* Connected to localhost (127.0.0.1) port 5601 (#0)
> GET / HTTP/1.1
> Host: localhost:5601
> User-Agent: curl/7.86.0
> Accept: */*
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 302 Found
< location: /login?next=%2F
< x-content-type-options: nosniff
< referrer-policy: no-referrer-when-downgrade
< content-security-policy: script-src 'unsafe-eval' 'self'; worker-src blob: 'self'; style-src 'unsafe-inline' 'self'
< kbn-name: f382d92d1bda
< kbn-license-sig: da420c53321c02b93e5b67b614ccdf37075cab5cc99a13d97fca5727603889d0
< cache-control: private, no-cache, no-store, must-revalidate
< content-length: 0
< Date: Sat, 18 Feb 2023 04:54:46 GMT
< Connection: keep-alive
< Keep-Alive: timeout=120
<

这样单机本的就好了。

集群版

新建一个 cluster 目录,把 .env 文件复制进去 ,

创建新的docker-compose.yaml文件,内容如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
version: '3'
services:
setup-cluster:
image: elasticsearch:${STACK_VERSION}
container_name: setup-cluster
volumes:
- ./setup-cluster.sh:/setup-cluster.sh
environment:
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- KIBANA_PASSWORD=${KIBANA_PASSWORD}
user: "0"
command: >
bash /setup-cluster.sh

es-cluster-01:
depends_on:
- setup-cluster
image: elasticsearch:${STACK_VERSION}
container_name: es-cluster-01
volumes:
- ./data/esdata01:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
environment:
- node.name=es-cluster-01
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es-cluster-01,es-cluster-02,es-cluster-03
- discovery.seed_hosts=es-cluster-02,es-cluster-03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
# - xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: curl -u elastic:${ELASTIC_PASSWORD} -s -f localhost:9200/_cat/health >/dev/null || exit 1
interval: 30s
timeout: 10s
retries: 5

es-cluster-02:
image: elasticsearch:${STACK_VERSION}
container_name: es-cluster-02
depends_on:
- es-cluster-01
volumes:
# - ./certs:/usr/share/elasticsearch/config/certs
- ./data/esdata02:/usr/share/elasticsearch/data
ports:
- '9202:9200'
- '9302:9300'
environment:
- node.name=es-cluster-02
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es-cluster-01,es-cluster-02,es-cluster-03
- discovery.seed_hosts=es-cluster-01,es-cluster-03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
# - xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: curl -u elastic:${ELASTIC_PASSWORD} -s -f localhost:9200/_cat/health >/dev/null || exit 1
interval: 30s
timeout: 10s
retries: 5


es-cluster-03:
image: elasticsearch:${STACK_VERSION}
container_name: es-cluster-03
depends_on:
- es-cluster-01
volumes:
- ./data/esdata03:/usr/share/elasticsearch/data
ports:
- '9203:9200'
- '9303:9300'
environment:
- node.name=es-cluster-03
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es-cluster-01,es-cluster-02,es-cluster-03
- discovery.seed_hosts=es-cluster-01,es-cluster-02
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
# - xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: curl -u elastic:${ELASTIC_PASSWORD} -s -f localhost:9200/_cat/health >/dev/null || exit 1
interval: 30s
timeout: 10s
retries: 5



kibana-cluster:
depends_on:
es-cluster-01:
condition: service_healthy
es-cluster-02:
condition: service_healthy
es-cluster-03:
condition: service_healthy
image: kibana:${STACK_VERSION}
container_name: kibana-cluster
ports:
- ${KIBANA_PORT}:5601
volumes:
- ./data/kibanadata:/usr/share/kibana/data

environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=["http://es-cluster-01:9200","http://es-cluster-02:9200","http://es-cluster-03:9200"]
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120


启动 docker-compose up -d

一分钟后查看 , kibana正在启动

1
2
3
4
5
6
7
$ docker-compose ps -a
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
es-cluster-01 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-01 About a minute ago Up About a minute (healthy) 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp
es-cluster-02 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-02 About a minute ago Up About a minute (healthy) 0.0.0.0:9202->9200/tcp, 0.0.0.0:9302->9300/tcp
es-cluster-03 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-03 About a minute ago Up About a minute (healthy) 0.0.0.0:9203->9200/tcp, 0.0.0.0:9303->9300/tcp
kibana-cluster kibana:7.17.9 "/bin/tini -- /usr/l…" kibana-cluster About a minute ago Up 11 seconds (health: starting) 0.0.0.0:5601->5601/tcp
setup-cluster elasticsearch:7.17.9 "/bin/tini -- /usr/l…" setup-cluster About a minute ago Up About a minute 9200/tcp, 9300/tcp

再过一会还是不见kibana启动好,却发现es-client-01退出,查看日志没有任何错误提示。

1
2
3
4
5
6
$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
es-cluster-02 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-02 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:9202->9200/tcp, 0.0.0.0:9302->9300/tcp
es-cluster-03 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-03 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:9203->9200/tcp, 0.0.0.0:9303->9300/tcp
kibana-cluster kibana:7.17.9 "/bin/tini -- /usr/l…" kibana-cluster 2 minutes ago Up About a minute (health: starting) 0.0.0.0:5601->5601/tcp
setup-cluster elasticsearch:7.17.9 "/bin/tini -- /usr/l…" setup-cluster 2 minutes ago Up 2 minutes 9200/tcp, 9300/tcp

然后执行想着执行docker-compose up -d 把es-client-01起来,结果是

1
2
3
4
5
6
$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
es-cluster-01 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-01 19 minutes ago Up 16 minutes (healthy) 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp
es-cluster-03 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-03 19 minutes ago Up 19 minutes (healthy) 0.0.0.0:9203->9200/tcp, 0.0.0.0:9303->9300/tcp
kibana-cluster kibana:7.17.9 "/bin/tini -- /usr/l…" kibana-cluster 19 minutes ago Up 18 minutes (healthy) 0.0.0.0:5601->5601/tcp
setup-cluster elasticsearch:7.17.9 "/bin/tini -- /usr/l…" setup-cluster 19 minutes ago Up 19 minutes 9200/tcp, 9300/tcp

这是后02 node又退出了,而且还是没有任何出错提示。感觉是这个集群只有两个能起来。

这时候直接访问es 和 kibana 都正常。

这时候用 ElasticSearch Head 查看es集群,发现一切正常,集群健康值green。

在老款笔记本执行

在2015款MacBook 上执行,这台电脑启动比较慢,应该是cpu 、内存、硬盘速度都不够快。

第一次完提示03不健康,估计是kibana检查重试的次数到了后自己退出了。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
$ docker-compose up -d
[+] Running 4/5
⠿ Container setup-cluster Started 0.9s
⠿ Container es-cluster-01 Healthy 156.1s
⠿ Container es-cluster-03 Error 155.6s
⠿ Container es-cluster-02 Healthy 156.5s
⠿ Container kibana-cluster Created 0.1s
dependency failed to start: container for service "es-cluster-03" is unhealthy

$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
es-cluster-01 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-01 2 minutes ago Up About a minute (health: starting) 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp
es-cluster-02 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-02 2 minutes ago Up About a minute (health: starting) 0.0.0.0:9202->9200/tcp, 0.0.0.0:9302->9300/tcp
es-cluster-03 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-03 2 minutes ago Up About a minute (health: starting) 0.0.0.0:9203->9200/tcp, 0.0.0.0:9303->9300/tcp
setup-cluster elasticsearch:7.17.9 "/bin/tini -- /usr/l…" setup-cluster 2 minutes ago Up About a minute 9200/tcp, 9300/tcp


这时候就手动启动 docker-compose up -d

1
2
3
4
5
6
7
8
$ docker-compose up -d
[+] Running 5/5
⠿ Container setup-cluster Running 0.0s
⠿ Container es-cluster-01 Healthy 0.6s
⠿ Container es-cluster-03 Healthy 0.6s
⠿ Container es-cluster-02 Healthy 0.6s
⠿ Container kibana-cluster Started

但是这时候kibana怎么也启动不起来,检查日志发现

es-cluster-02 | {“type”: “server”, “timestamp”: “2023-02-18T06:11:25,259Z”, “level”: “WARN”, “component”: “o.e.c.r.a.DiskThresholdMonitor”, “cluster.name”: “docker-cluster”, “node.name”: “es-cluster-02”, “message”: “high disk watermark [90%] exceeded on [pdT2lWRmQEi04k5GYvrWuA][es-cluster-01][/usr/share/elasticsearch/data/nodes/0] free: 88.6gb[9.2%], shards will be relocated away from this node; currently relocating away shards totalling [0] bytes; the node is expected to continue to exceed the high disk watermark when these relocations are complete”, “cluster.uuid”: “xaadt2vISeWTK4hk8RDJeA”, “node.id”: “7rYuhhyeS86iyKOtUChBKw” }

大致意思是我硬盘空间快满了,shards将不会分配给这个node,搜了一下解决办法就是

1
2
3
4
5
6
7
8
9
10
11
curl -XPUT "http://localhost:9200/_cluster/settings" \
-H 'Content-Type: application/json' -d'
{
"persistent": {
"cluster": {
"routing": {
"allocation.disk.threshold_enabled": false
}
}
}
}'

执行完以后看到 kibana 日志就迅速滚动起来。后面再看看 kibana 启动时候都干了啥,为啥这么慢。

这时候 cpu 占用比较高,风扇哗啦啦响。

过了好久发现es-cluster-01 退出了,依然是没有任何错误提示,kibana自己提示 unhealthy 了。

1
2
3
4
5
6
$ docker-compose ps
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
es-cluster-02 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-02 30 minutes ago Up 29 minutes (healthy) 0.0.0.0:9202->9200/tcp, 0.0.0.0:9302->9300/tcp
es-cluster-03 elasticsearch:7.17.9 "/bin/tini -- /usr/l…" es-cluster-03 30 minutes ago Up 29 minutes (healthy) 0.0.0.0:9203->9200/tcp, 0.0.0.0:9303->9300/tcp
kibana-cluster kibana:7.17.9 "/bin/tini -- /usr/l…" kibana-cluster 29 minutes ago Up 26 minutes (unhealthy) 0.0.0.0:5601->5601/tcp
setup-cluster elasticsearch:7.17.9 "/bin/tini -- /usr/l…" setup-cluster 30 minutes ago Up 29 minutes 9200/tcp, 9300/tcp

唉~看来es集群没问题,但是启动kibana的时候会较多的事情。再次重新启动,这时候一切正常了。

下面研究为啥cluster只启动两个的问题。这时候访问任何一个 node ,感觉都是健康的。

这世道乱了,忙乱了好久,最后看了下docker分配的cpu只有1个,内存只有2.8G😲,好吧,增加内存,这世界就安静了。

集群版价security版

新建一个 cluster-ssl 目录,把 .env 文件复制进去 ,

新建 docker-compose.yml,主要增加了 xpack 的配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
version: '3'
services:
setupssl:
image: elasticsearch:${STACK_VERSION}
container_name: setupssl
volumes:
- ./data/certs:/usr/share/elasticsearch/config/certs
- ./setup.sh:/setup.sh
environment:
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- KIBANA_PASSWORD=${KIBANA_PASSWORD}
user: "0"
command: >
bash /setup.sh
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120

es01:
depends_on:
setupssl:
condition: service_healthy
image: elasticsearch:${STACK_VERSION}
container_name: es01
volumes:
- ./data/certs:/usr/share/elasticsearch/config/certs
- ./data/esdata01:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120

es02:
depends_on:
- es01
image: elasticsearch:${STACK_VERSION}
container_name: es02
volumes:
- ./data/certs:/usr/share/elasticsearch/config/certs
- ./data/esdata02:/usr/share/elasticsearch/data
ports:
- '9202:9200'
- '9302:9300'
environment:
- node.name=es02
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es02/es02.key
- xpack.security.http.ssl.certificate=certs/es02/es02.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es02/es02.key
- xpack.security.transport.ssl.certificate=certs/es02/es02.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120


es03:
depends_on:
- es02
image: elasticsearch:${STACK_VERSION}
container_name: es03
volumes:
- ./data/certs:/usr/share/elasticsearch/config/certs
- ./data/esdata03:/usr/share/elasticsearch/data
ports:
- '9203:9200'
- '9303:9300'
environment:
- node.name=es03
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es02
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es03/es03.key
- xpack.security.http.ssl.certificate=certs/es03/es03.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es03/es03.key
- xpack.security.transport.ssl.certificate=certs/es03/es03.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120

kibana:
depends_on:
es01:
condition: service_healthy
es02:
condition: service_healthy
es03:
condition: service_healthy
image: kibana:${STACK_VERSION}
container_name: kibana
ports:
- ${KIBANA_PORT}:5601
volumes:
- ./data/certs:/usr/share/kibana/config/certs
- ./data/kibanadata:/usr/share/kibana/data

environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120

setup.sh

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";

然后启动顺利