一、Nginx介绍
1.1 引言
1.2 Nginx介绍
二、Nginx的安装
2.1 安装Nginx
使用docker-compose安装
#在/opt目录下创建docker_nginx目录
cd /opt
mkdir docker_nginx
#创建docker-compose.yml文件并编写下面的内容,保存退出
vim docker-compose.yml
version: '3.1'
services:
nginx:
restart: always
image: daocloud.io/library/nginx:latest
container_name: nginx
ports:
- 80:80
执行docker-compose up -d
2.2 Nginx的配置文件
# 查看当前nginx的配置需要进入docker容器中
docker exec -it 容器id bash
# 进入容器后
cd /etc/nginx/
cat nginx.conf
nginx.conf
文件内容如下
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
# 以上同称为全局块
# worker_processes的数值越大,Nginx的并发能力就越强
# error_log代表Nginx错误日志存放的位置
# pid是Nginx运行的一个标识
events {
worker_connections 1024;
}
# events块
# worker_connections的数值越大,Nginx的并发能力就越强
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
# http块
# include 代表引入一个外部文件
# include /etc/nginx/mime.types; mime.types中存放着大量媒体类型
# include /etc/nginx/conf.d/*.conf; 引入了conf.d下以.conf为结尾的配置文件
conf.d
目录下只有一个default.conf
文件,内容如下
server {
listen 80;
listen [::]:80;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
# location块
# root:将接受到的请求根据/usr/share/nginx/html去查找静态资源
# index:默认去上述的路径中找到index.html或index.htm
}
# server块
# listen代表Nginx监听的端口号
# server_name代表Nginx接受请求的IP
2.3 修改docker-compose文件
# 退出容器
exit
# 关闭容器
docker-compose down
# 修改docker-compose.yml文件如下
version: '3.1'
services:
nginx:
restart: always
image: daocloud.io/library/nginx:latest
container_name: nginx
ports:
- 80:80
volumes:
- /opt/docker_nginx/conf.d/:/etc/nginx/conf.d
# 重新构建容器
docker-compose bulid
# 重新启动容器
docker-compose up -d
这时我们再次访问80端口是访问不到的,因为我们映射了数据卷之后还没有编写server块中的内容
我们在/opt/docker_nginx/conf.d下新建default.conf,并插入如下内容
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
# 重启nginx
docker-compose restart
三、Nginx的反向代理
3.1 正向代理和反向代理介绍
3.2 基于Nginx实现反向代理
准备Tomcat
服务器
docker run -d -p 8080:8080 --name tomcat daocloud.io/library/tomcat:8.5.15-jre8
#或者已经下载了tomcat镜像
docker run -d -p 8080:8080 --name tomcat 镜像的标识
#添加数据卷
docker run -it -v /宿主机绝对目录:/容器内目录 镜像名
default.conf
文件内容如下
server {
listen 80;
listen [::]:80;
server_name localhost;
# 基于反向代理访问Tomcat服务器
location / {
proxy_pass http://IP:8080/;
}
}
# 重启nginx
docker-compose restart
3.3 关于Nginx的location路径映射
优先级关系:
(location = )
> (location /xxx/yyy/zzz)
> (location ^~)
> (location ~,~*)
> (location /起始路径)
> (location /)
# 1. = 匹配
location / {
# 精准匹配,主机名后面不能带能和字符串
# 例如www.baidu.com不能是www.baidu.com/id=xxx
}
# 2. 通用匹配
location /xxx {
# 匹配所有以/xxx开头的路径
# 例如127.0.0.1:8080/xxx xxx可以为空,为空则和=匹配一样
}
# 3. 正则匹配
location ~ /xxx {
# 匹配所有以/xxx开头的路径
}
# 4. 匹配开头路径
location ^~ /xxx/xx {
# 匹配所有以/xxx/xx开头的路径
}
# 5. 匹配结尾路径
location ~* \.(gif/jpg/png)$ {
# 匹配以.gif、.jpg或者.png结尾的路径
}
修改/opt/docker_nginx/conf.d/default.conf
如下
server {
listen 80;
listen [::]:80;
server_name localhost;
location /index {
proxy_pass http://IP:8081/; # A首页
}
location ^~ /mall/ {
proxy_pass http://IP:8080/; # B首页
}
location / {
proxy_pass http://IP:8080/; # C首页
}
}
docker-compose restart
四、Nginx负载均衡
4.1 轮询
想实现轮询
负载均衡机制只需要修改配置文件如下
upstream my_server{
server IP:8080;
server IP:8081;
}
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
proxy_pass http://my_server/; #Tomcat首页
}
}
4.2 权重
实现权重
的方式:在配置文件中upstream
块中加上weight
upstream my_server{
server IP:8080 weight=10;
server IP:8081 weight=2;
}
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
proxy_pass http://my_server/; #Tomcat首页
}
}
4.3 ip_hash
实现ip_hash
方式:在配置文件upstream
块中加上ip_hash
upstream my_server{
ip_hash;
server IP:8080 weight=10;
server IP:8081 weight=2;
}
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
proxy_pass http://my_server/; #Tomcat首页
}
}
五、Nginx动静分离
5.1 动态资源代理
# 配置如下
location / {
proxy_pass 路径;
}
5.2 静态资源代理
先修改docker-compose文件
version: '3.1'
services:
nginx:
restart: always
image: daocloud.io/library/nginx:latest
container_name: nginx
ports:
- 80:80
volumes:
- /opt/docker_nginx/conf.d/:/etc/nginx/conf.d
- /opt/docker_nginx/html/:/usr/share/nginx/html
# 在/opt/docker_nginx/html下新建一个index.html
# 在index.html里面随便写点东西展示
# 修改nginx的配置文件
location / {
root /usr/share/nginx/html;
index index.html;
}
# 配置如下
location / {
root 静态资源路径;
index 默认访问路径下的什么资源;
autoindex on; # 代表展示静态资源的全部内容,以列表的形式展开
}
# 重启nginx
docker-compose restart
六、Nginx集群
6.1 引言
6.2 搭建
# 先准备好以下文件放入/opt/docker_nginx_cluster目录中
# 然后启动容器 注意确保80、8081和8082端口未被占用(或者修改docker-compose.yml中的端口)
docker-compose up -d
# 然后我们访问8081端口可以看到master,访问8082端口可以看到slave
# 因为我们设置了81端口的master优先级未200比82端口的slave优先级100高,所以我们访问80端口看到的是master
# 现在我们模仿8081端口的nginx宕机了
# docker stop 8081端口nginx容器的ID
# 这时我们再去访问80端口看到的就是slave了
Dockerfile
FROM nginx:1.13.5-alpine
RUN apk update && apk upgrade
RUN apk add --no-cache bash curl ipvsadm iproute2 openrc keepalived
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
CMD ["/entrypoint.sh"]
entrypoint.sh
#!/bin/sh
#/usr/sbin/keepalvined -n -l -D -f /etc/keepalived/keepalived.conf --dont-fork --log-console &
/usr/sbin/keepalvined -D -f /etc/keepalived/keepalived.conf
nginx -g "daemon off;"
docker-compose.yml
version: "3.1"
services:
nginx_master:
build:
context: ./
dockerfile: ./Dockerfile
ports:
-8081:80
volumes:
- ./index-master.html:/usr/share/nnginx/html/index.html
- ./favicon.ico:/usr/share/nnginx/html/favicon.ico
- ./keepalived-master.conf:/etv/keepalived/keepalived.conf
networks:
static-network:
ipv4_address:172.20.128.2
cap_add:
- NET_ADMIN
nginx_slave:
build:
context: ./
dockerfile: ./Dockerfile
ports:
-8082:80
volumes:
- ./index-slave.html:/usr/share/nnginx/html/index.html
- ./favicon.ico:/usr/share/nnginx/html/favicon.ico
- ./keepalived-slave.conf:/etv/keepalived/keepalived.conf
networks:
static-network:
ipv4_address:172.20.128.3
cap_add:
- NET_ADMIN
proxy:
image: haproxy:1.7-apline
ports:
- 80:6301
volumes:
- ./happroxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
networks:
- static-network
networks:
static-network:
ipam:
congig:
- subnet: 172.20.0.0/16
keepalived-master.conf
vrrp_script chk_nginx {
script "pidof nginx"
interval 2
}
vrrp_instance VI_1 {
state MASTER
interface etch0 # 容器内部的网卡名称
virtual_router_id 33
priority 200 # 优先级
advert_int 1
autheentication {
auth_type PASS
auth_pass letmein
}
virtual_ipaddress {
172.20.128.50 # 虚拟路径
}
track_script {
chk_nginx
}
}
keepalived-slave.conf
vrrp_script chk_nginx {
script "pidof nginx"
interval 2
}
vrrp_instance VI_1 {
state BACKUP
interface etch0 # 容器内部的网卡名称
virtual_router_id 33
priority 100 # 优先级
advert_int 1
autheentication {
auth_type PASS
auth_pass letmein
}
virtual_ipaddress {
172.20.128.50 # 虚拟路径
}
track_script {
chk_nginx
}
}
haproxy.cfg
global
log 127.0.0.1 local0
maxconn 4096
daemon
nbproc 4
defaults
log 127.0.0.1 local3
mode http
option dontlognull
option redispatch
retries 2
maxconn 2000
balance roundrobin
timeout connect 5000ms
timeout client 5000ms
timeout server 5000ms
frontend main
bind *:6301
default_backend webserver
backend webserveer
server nginx_master 127.20.127.50:80 check inter 2000 rise 2 fall 5