操作流程
[root@lhj ~]
f9f57f074a0610fb4e73f07aed121c1e912e7c6212c6ccd353f90fd1b78e7a3d
[root@lhj ~]
6ad0cc4a179d9e7a18462bc6a06f5fa3e9fd8ade870f58bba86e732a3bc19368
[root@lhj ~]
65de80eeff3bf6771a7d43d5e4235f74dfdb118f6fac334488630e623d27189c
[root@lhj ~]
65c5d362e6a7bf47ceef78b427e6d2e2ca28821ae1e8a127dbd47e95bb7c3cb9
[root@lhj ~]
e120b2aa85afd21a15d7af37e22647dc3849869a5be831dc1d0cde663155b13a
[root@lhj ~]
975a061428e5bd1b1a9552d25af908efa9fd068c2b872f5dbf8aa73c74dedac8
[root@lhj ~]
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
975a061428e5 redis:6.2.5 "docker-entrypoint.s…" 3 seconds ago Up 2 seconds redis-node6
e120b2aa85af redis:6.2.5 "docker-entrypoint.s…" 17 seconds ago Up 16 seconds redis-node5
65c5d362e6a7 redis:6.2.5 "docker-entrypoint.s…" 29 seconds ago Up 29 seconds redis-node4
65de80eeff3b redis:6.2.5 "docker-entrypoint.s…" 43 seconds ago Up 42 seconds redis-node3
6ad0cc4a179d redis:6.2.5 "docker-entrypoint.s…" 58 seconds ago Up 58 seconds redis-node2
f9f57f074a06 redis:6.2.5 "docker-entrypoint.s…" About a minute ago Up About a minute redis-node1
8e7967f83558 vmware/harbor-jobservice:v1.1.2 "/harbor/harbor_jobs…" 4 days ago Up 4 days harbor-jobservice
6a5c0023b1d5 vmware/nginx:1.11.5-patched "nginx -g 'daemon of…" 4 days ago Up 4 days 0.0.0.0:80->80/tcp, :::80->80/tcp, 0.0.0.0:443->443/tcp, :::443->443/tcp, 0.0.0.0:4443->4443/tcp, :::4443->4443/tcp nginx
a85cea101191 vmware/harbor-ui:v1.1.2 "/harbor/harbor_ui" 4 days ago Up 4 days harbor-ui
a7b304fb4b69 vmware/harbor-db:v1.1.2 "docker-entrypoint.s…" 4 days ago Up 4 days 3306/tcp harbor-db
d124c93d116c vmware/registry:2.6.1-photon "/entrypoint.sh serv…" 4 days ago Up 4 days 5000/tcp registry
38ddd033de7d vmware/harbor-adminserver:v1.1.2 "/harbor/harbor_admi…" 4 days ago Up 4 days harbor-adminserver
2f21f081e276 vmware/harbor-log:v1.1.2 "/bin/sh -c 'crond &…" 4 days ago Up 4 days 127.0.0.1:1514->514/tcp harbor-log
[root@lhj ~]
root@lhj:/data
root@lhj:/data
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.186.10:6385 to 192.168.186.10:6381
Adding replica 192.168.186.10:6386 to 192.168.186.10:6382
Adding replica 192.168.186.10:6384 to 192.168.186.10:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: 153cc92c3b2007dcabf6a5df7136df8697c990be 192.168.186.10:6381
slots:[0-5460] (5461 slots) master
M: e5b5e6c823d59c601b95f5f7a017653c09a2c922 192.168.186.10:6382
slots:[5461-10922] (5462 slots) master
M: e1932ffb388350211e7c981f66e976e5905b172d 192.168.186.10:6383
slots:[10923-16383] (5461 slots) master
S: 3516db70dd4dafcb3fe1ba43f3d2fc383cf364b9 192.168.186.10:6384
replicates 153cc92c3b2007dcabf6a5df7136df8697c990be
S: d52121486e6ee09b72ec26ce30a2e9eddf10ec82 192.168.186.10:6385
replicates e5b5e6c823d59c601b95f5f7a017653c09a2c922
S: 4925283241288ed83d7d2b90ee85c886b1fd6220 192.168.186.10:6386
replicates e1932ffb388350211e7c981f66e976e5905b172d
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.186.10:6381)
M: 153cc92c3b2007dcabf6a5df7136df8697c990be 192.168.186.10:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: e1932ffb388350211e7c981f66e976e5905b172d 192.168.186.10:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: e5b5e6c823d59c601b95f5f7a017653c09a2c922 192.168.186.10:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: d52121486e6ee09b72ec26ce30a2e9eddf10ec82 192.168.186.10:6385
slots: (0 slots) slave
replicates e5b5e6c823d59c601b95f5f7a017653c09a2c922
S: 4925283241288ed83d7d2b90ee85c886b1fd6220 192.168.186.10:6386
slots: (0 slots) slave
replicates e1932ffb388350211e7c981f66e976e5905b172d
S: 3516db70dd4dafcb3fe1ba43f3d2fc383cf364b9 192.168.186.10:6384
slots: (0 slots) slave
replicates 153cc92c3b2007dcabf6a5df7136df8697c990be
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@lhj:/data
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:403
cluster_stats_messages_pong_sent:441
cluster_stats_messages_sent:844
cluster_stats_messages_ping_received:436
cluster_stats_messages_pong_received:403
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:844
127.0.0.1:6381> cluster nodes
e1932ffb388350211e7c981f66e976e5905b172d 192.168.186.10:6383@16383 master - 0 1641735411000 3 connected 10923-16383
e5b5e6c823d59c601b95f5f7a017653c09a2c922 192.168.186.10:6382@16382 master - 0 1641735411000 2 connected 5461-10922
d52121486e6ee09b72ec26ce30a2e9eddf10ec82 192.168.186.10:6385@16385 slave e5b5e6c823d59c601b95f5f7a017653c09a2c922 0 1641735412845 2 connected
153cc92c3b2007dcabf6a5df7136df8697c990be 192.168.186.10:6381@16381 myself,master - 0 1641735412000 1 connected 0-5460
4925283241288ed83d7d2b90ee85c886b1fd6220 192.168.186.10:6386@16386 slave e1932ffb388350211e7c981f66e976e5905b172d 0 1641735411000 3 connected
3516db70dd4dafcb3fe1ba43f3d2fc383cf364b9 192.168.186.10:6384@16384 slave 153cc92c3b2007dcabf6a5df7136df8697c990be 0 1641735411841 1 connected