blank
blank
发布于 2019-10-15 / 480 阅读 / 0 评论 / 0 点赞

docker-compose 运行 elasticsearch kibana 伪集群 nginx 作为前置代理加密

配置前置依赖

# es 配置 
# The vm.max_map_count kernel setting needs to be set to at least 262144 for production use. Depending on your platform:
echo vm.max_map_count=262144 >> /etc/sysctl.conf && sysctl -p

# 创建 elasticsearch 数据/日志目录
mkdir -p /docker/elasticsearch/elasticsearch/data/{node1,node2,node3}
mkdir -p /docker/elasticsearch/elasticsearch/logs/{node1,node2,node3}

# 目录授权
chmod -R 755 /docker/elasticsearch/elasticsearch/data /docker/elasticsearch/elasticsearch/logs

# 修改文件所有者 以匹配 `es` 在`docker` 中运行用户 id 为 1000
sudo chown -R 1000:1000 /docker/elasticsearch/elasticsearch/data /docker/elasticsearch/elasticsearch/logs

# 创建 nginx 数据/日志目录
mkdir -p /docker/elasticsearch/nginx/conf
mkdir -p /docker/elasticsearch/nginx/logs

# 目录授权
chmod -R 755 /docker/elasticsearch/nginx

# 修改文件所有者 以匹配 `nginx` 在`docker` 中运行用户 id 为 101
chown -R 101:101 /docker/elasticsearch/nginx

# 安装 httpd
yum install -y httpd

# 配置 http basic auth 
(我们利用不对外暴露 ES 端口 利用 nginx 代理 并配置 nginx 的 访问账号密码 来达到对 ES 服务加密的目地
elastic 为用户名 可自定义)
htpasswd -bc /docker/elasticsearch/nginx/conf/passwords elastic test123

# 确认密码配置
cat /docker/elasticsearch/nginx/conf/passwords

elastic:$apr1$VjDt5vbA$ElCOn7Ys2nSgQps7Nz.UM/

生成 docker-compose.yml

cat > /docker/elasticsearch/docker-compose.yml <<EOF
version: '3.7'

services:
  elasticsearch_n1:
    image: elasticsearch:6.8.3
    container_name: elasticsearch_n1
    restart: always
    #privileged: true
#    ports:
#      - "9201:9200"
#      - "9301:9300"
    environment:
      - cluster.name=docker-es
      - node.name=node1
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
      - "discovery.zen.ping.unicast.hosts=elasticsearch_n1,elasticsearch_n2,elasticsearch_n3"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - ./elasticsearch/data/node1:/usr/share/elasticsearch/data
      - ./elasticsearch/logs/node1:/usr/share/elasticsearch/logs
    networks:
      - elastic

  elasticsearch_n2:
    image: elasticsearch:6.8.3
    container_name: elasticsearch_n2
    restart: always
    privileged: true
#    ports:
#      - "9202:9200"
#      - "9302:9300"
    environment:
      - cluster.name=docker-es
      - node.name=node2
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
      - "discovery.zen.ping.unicast.hosts=elasticsearch_n1,elasticsearch_n2,elasticsearch_n3"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - ./elasticsearch/data/node2:/usr/share/elasticsearch/data
      - ./elasticsearch/logs/node2:/usr/share/elasticsearch/logs
    networks:
      - elastic

  elasticsearch_n3:
    image: elasticsearch:6.8.3
    container_name: elasticsearch_n3
    restart: always
    privileged: true
#    ports:
#      - "9203:9200"
#      - "9303:9300"
    environment:
      - cluster.name=docker-es
      - node.name=node3
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
      - "discovery.zen.ping.unicast.hosts=elasticsearch_n1,elasticsearch_n2,elasticsearch_n3"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - ./elasticsearch/data/node3:/usr/share/elasticsearch/data
      - ./elasticsearch/logs/node3:/usr/share/elasticsearch/logs
    networks:
      - elastic


  kibana:
    image: kibana:6.8.3
    container_name: elasticsearch_kibana
    restart: always
    privileged: true
    # ports:
      # - "5601:5601"
    environment:
      # 需要将Kibana配置文件中的小写转换成大写,然后这个才能用于变量,才能被设置到 (未测试)
      I18N_LOCALE: zh-CN
      XPACK_MONITORING_ENABLED: "true"
      ELASTICSEARCH_HOSTS: http://elasticsearch_n1:9200
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s      
    networks:
      - elastic
    depends_on:
     - elasticsearch_n1

  nginx:
    image: nginx:latest
    container_name: elasticsearch_nginx
    restart: always
    user: root
    privileged: true
    depends_on:
      - elasticsearch_n1
      - elasticsearch_n2
      - elasticsearch_n3
      - kibana
    ports:
      - "9200:9200"
      - "9300:9300"
      - "5601:5601"
    volumes:
      - ./nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro
      - ./nginx/conf/passwords:/etc/nginx/passwords
      - ./nginx/logs/:/var/log/nginx
    command: [nginx, '-g', 'daemon off;']
    ulimits:
      nproc: 65535
    logging:
      driver: journald   
    networks:
      - elastic

networks: # 创建对应的网络
  elastic: # 网络名
    driver: bridge # 网络方式 bridge
EOF

创建 nginx 反代配置

vim /docker/elasticsearch/nginx/conf/nginx.conf

# For more information on configuration, see:
#   * Official English Documentation: http://nginx.org/en/docs/
#   * Official Russian Documentation: http://nginx.org/ru/docs/

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# start of http
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/all-access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   10m;
    types_hash_max_size 2048;
    #Set Upload File limtit to 100MB
    client_max_body_size 1G;
    #Fix the timeout error during the upload time is too long
    client_header_timeout         10m; 
    client_body_timeout           10m; 
    proxy_connect_timeout         5m; 
    proxy_read_timeout            10m; 
    proxy_send_timeout            10m;

    gzip  on;
    gzip_min_length 10240;
    gzip_proxied expired no-cache no-store private auth;
    gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
    gzip_disable "MSIE [1-6].";

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    # hiding nginx version version
    server_tokens off;

    # Load modular configuration files from the /etc/nginx/conf.d directory.
    # See http://nginx.org/en/docs/ngx_core_module.html#include
    # for more information.
    # include /etc/nginx/conf.d/*.conf;


    upstream es {
      # es server http port

      server elasticsearch_n1:9200;
      server elasticsearch_n2:9200;
      server elasticsearch_n3:9200;


      # persistent http connections
      # https://www.elastic.co/blog/playing-http-tricks-nginx
      keepalive 15;
    }

    upstream kibana {
      # kibana server http port

      server elasticsearch_kibana:5601;

      # persistent http connections
      # https://www.elastic.co/blog/playing-http-tricks-nginx
      keepalive 15;
    }

    # es http
    server {
      listen 9200;
      server_name _;

      auth_basic "Restricted Access";
      auth_basic_user_file /etc/nginx/passwords;

      location / {

        # deny node shutdown api
        if ($request_filename ~ "_shutdown") {
          return 403;
          break;
        }

        proxy_pass http://es;
        proxy_http_version 1.1;
        proxy_set_header Connection "Keep-Alive";
        proxy_set_header Proxy-Connection "Keep-Alive";
        proxy_redirect off;
      }

      location = / {
        proxy_pass http://es;
        proxy_http_version 1.1;
        proxy_set_header Connection "Keep-Alive";
        proxy_set_header Proxy-Connection "Keep-Alive";
        proxy_redirect off;
        auth_basic "off";
      }

      location ~* ^(/_cluster/health|/_cat/health) {
        proxy_pass http://es;
        proxy_http_version 1.1;
        proxy_set_header Connection "Keep-Alive";
        proxy_set_header Proxy-Connection "Keep-Alive";
        proxy_redirect off;
        auth_basic "off";
      }
    }


    # kibana
    server {
      listen 5601;
      server_name _;

      auth_basic "Restricted Access";
      auth_basic_user_file /etc/nginx/passwords;

      location / {
      
        proxy_pass http://kibana;
        proxy_http_version 1.1;
        proxy_set_header Connection "Keep-Alive";
        proxy_set_header Proxy-Connection "Keep-Alive";
        proxy_redirect off;
        #auth_basic "off";
      }
    }

# end of http  
}

#tcp proxy
stream  {

    #timeout 1d;
    proxy_timeout 1d;
    proxy_connect_timeout 10s;

    # es
    upstream es_tcp {
        server elasticsearch_n1:9300;
        server elasticsearch_n2:9300;
        server elasticsearch_n3:9300;
    }

    # es 
    server {
        listen 9300;
        proxy_pass es_tcp;
        tcp_nodelay on;
    }

}

启动

cd /docker/elasticsearch && docker-compose up -d && docker-compose logs -f

启动完成后 访问 host:5601 为加密后的 kibana 输入刚刚设置的 账号 elastic 密码 test123 和 host:9200 为加密后的 es http host:9300 为 es tcp

设置中文分词器

# 在每个 ES 节点中都要操作 分别 在 n1 n2 n3 节点中执行
docker exec -it elasticsearch_n1/n2/n3 bash
# 使用 elasticsearch-plugin 安装中文分词器
elasticsearch-plugin install \
https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v6.6.2/elasticsearch-analysis-ik-6.6.2.zip

# 修改 ES 默认分词器为 IK
curl -XPUT 'http://localhost:9200/_all/_settings?preserve_existing=true' -d '{
  "index.analysis.analyzer.default.type" : "ik_smart"
}'

评论