0
点赞
收藏
分享

微信扫一扫

Elasticsearch集群搭建

搭建前提
  1. ELK官网下载7.17版本的包
  2. 3台centos7.9的虚拟机
配置系统参数

root用户
vi /etc/sysctl.conf

添加
vm.max_map_count=262144

重新加载生效配置
sysctl -p

输入:vi /etc/security/limits.conf  末尾追加
soft nofile 65536
hard nofile 65536
soft nproc 4096
hard nproc 4096


创建elk普通用户,root授权文件夹权限

useradd elk

chown -R elk:elk /usr/local/elk


Elasticsearch集群搭建
ElasticSearch集群安装
  • 1、下载elasticsearch包,上传服务器/usr/local,解压文件

tar -zxvf elasticsearch-7.17.1-linux-x86_64.tar.gz

  • 2、重命名

mv elasticsearch-7.17.1 elasticsearch

  • 3、修改elasticsearch.yml配置文件,其他两个节点配置文件一样,只修改node.name

cluster.name: elasticsearch
node.name: node-1
node.master: true
node.data: true
path.data: /usr/local/elasticsearch/data
path.logs: /usr/local/elasticsearch/logs
network.host: 0.0.0.0
http.port: 9200
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.seed_hosts: ["192.168.10.135", "192.168.10.136", "192.168.10.137"]
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]

  • 4、创建普通用户,并授权elasticsearch文件夹权限

root用户下

#创建用户
useradd elk
#授权
chown -R elk:elk /usr/local/elasticsearch
#切换用户
su elk
#转到路径
cd /usr/local/elasticsearch
#运行elasticsearch
./bin/elasticsearch

#浏览器中打开地址
http://ip:9200


192.168.10.135:9200/_cat/nodes
192.168.10.135:9200/_cat/health?v
打开地址,看到node.data是3,说明集群搭建成功。依次关闭

ps -ef|grep elastic
kill pid

  • 5、生成证书

#root账号下
cd /usr/local/elasticsearch/
./bin/elasticsearch-certutil ca -out /usr/local/elasticsearch/config/elastic-stack-ca.p12
#不输入密码,直接回车

./bin/elasticsearch-certutil cert --ca /usr/local/elasticsearch/config/elastic-stack-ca.p12 -out /usr/local/elasticsearch/config/elastic-certificates.p12 -pass ""
#输入密码时,直接回车

#将/usr/local/elasticsearch/config/下的elastic-certificates.p12文件复制到其他两个节点同目录下
#重新授权一次
chown -R elk:elk /usr/local/elasticsearch

  • 6、修改配置文件

elasticsearch.yml配置文件编辑

cluster.name: elasticsearch
node.name: node-1
node.master: true
node.data: true
path.data: /usr/local/elasticsearch/data
path.logs: /usr/local/elasticsearch/logs
network.host: 0.0.0.0
http.port: 9200
transport.tcp.port: 9300
transport.tcp.compress: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]

  • 7、切换elk用户依次启动elasticsearch

su elk
#转到路径
cd /usr/local/elasticsearch
#运行elasticsearch ,-d是后台启动
./bin/elasticsearch -d
#启动之后,浏览器中打开,需要输入密码。

Elasticsearch集群搭建_elastic

  • 8、elk设置elasticsearch的密码

确认节点都启动之后,还是用elk账号

#转到路径
cd /usr/local/elasticsearch/bin
#执行设置密码
./elasticsearch-setup-passwords interactive
#出现提示:Please confirm that you would like to continue [y/N] 
#输入y回车
#将密码都设置成统一,如123456
Enter password for [elastic]:
Reenter password for [elastic]:
Enter password for [apm_system]:
Reenter password for [apm_system]:
Enter password for [kibana_system]:
Reenter password for [kibana_system]:
Enter password for [logstash_system]:
Reenter password for [logstash_system]:
Enter password for [beats_system]:
Reenter password for [beats_system]:
Enter password for [remote_monitoring_user]:
Reenter password for [remote_monitoring_user]:
Changed password for user [apm_system]
Changed password for user [kibana_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]
#无异常,说明设置完成。

  • 9、验证集群搭建成功,

在网页中输入密码,可看到如下页面,说明集群搭建成功

Kibana安装

下载kibana-7.17.1-linux-x86_64.tar.gz

上传/usr/local

root用户:
解压,tar -zxvf kibana-7.17.1-linux-x86_64.tar.gz
改名:mv kibana-7.17.1-linux-x86_64 kibana

修改参数
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.10.135:9200","http://192.168.10.136:9200","http://192.168.10.137:9200"]
elasticsearch.username: "kibana_system"
elasticsearch.password: "123456"
i18n.locale: "zh-CN"
授权:
chown -R elk:elk /usr/local/

1、root运行
nohup ./bin/kibana --allow-root &
2、切换elk用户,后台执行 nohup ./bin/kibana &
查看进程:ps -ef|grep node
kill -9 端口

浏览器中
http://192.168.10.35:5601
elastic / 123456

左侧菜单Management下开发工具
在控制台输入GET _cat/nodes 执行,可看到ES的集群状态

Elasticsearch集群搭建_elastic_02

Elasticsearch集群搭建_elk_03

Logstash

1、下载文件logstash-7.17.1-linux-x86_64.tar.gz,上传/usr/local
2、解压文件
tar -zxvf logstash-7.17.1-linux-x86_64.tar.gz
3、重命名
mv logstash-7.17.1 logstash
4、修改配置文件


input {
  beats {
    port => 5044
  }
  tcp{
    type => "syslog"
    port => 11514
  }
  udp{
    type => "syslog"
    port => 11514
  }
}

filter {
  grok {
    match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
    add_field => [ "received_at", "%{@timestamp}" ]
    add_field => [ "received_from", "%{host}" ]
  }
  date {
    match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
  }
}
output {
  if [type] == "info"{
    elasticsearch {
      hosts => ["http://192.168.10.135:9200","http://192.168.10.136:9200","http://192.168.10.137:9200"]
      index => "logstash-%{[service]}-info-%{+YYYY.MM.dd}"
      user => "elastic"
      password => "123456"
    }
  }else if [type] == "warn"{
    elasticsearch {
      hosts => ["http://192.168.10.135:9200","http://192.168.10.136:9200","http://192.168.10.137:9200"]
      index => "logstash-%{[service]}-warn-%{+YYYY.MM.dd}"
      user => "elastic"
      password => "123456"
    }
  }else if [type] == "error"{
    elasticsearch {
      hosts => ["http://192.168.10.135:9200","http://192.168.10.136:9200","http://192.168.10.137:9200"]
      index => "logstash-%{[service]}-error-%{+YYYY.MM.dd}"
      user => "elastic"
      password => "123456"
    }
  }else {
    elasticsearch {
      hosts => ["http://192.168.10.135:9200","http://192.168.10.136:9200","http://192.168.10.137:9200"]
      index => "logstash-%{+YYYY.MM.dd}"
      user => "elastic"
      password => "123456"
    }
  }
}

5、后台运行
nohup ./bin/logstash -f ./config/logstash.conf &

Filebeat

1、下载filebeat-7.17.1-linux-x86_64.tar.gz,上传/usr/local
2 、解压
3、重命名
4、修改配置文件
#收集MQTT
- type: mqtt
  enabled: true
  tags: ["mqtt_test"]
  hosts:
    - tcp://localhost:1883
  topics:
    - "thing/thing002"
  fields:
   service: mqttinfo
   type: mqtt
  fields_under_root: true


#收集log日志
- type: log
  # Change to true to enable this input configuration.
  enabled: true
  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - D:\00_Dev_Environment\ELK\Logs\info.log
  fields:
   service: infomation
   type: info
  fields_under_root: true
  
- type: log
  enabled: true
  paths:
    - D:\00_Dev_Environment\ELK\Logs\warn.log
  fields:
   service: warning
   type: warn
  fields_under_root: true



#kibana设置
setup.kibana:
  host: "192.168.10.135:5601"
#输出logstash
output.logstash:
  hosts: ["192.168.10.135:5044"]
5、后台运行
nohup ./filebeat -e -c filebeat.yml &


举报

相关推荐

0 条评论