一、制作docker镜像:
Dockerfile文件:
FROM alpine:latest MAINTAINER chengcuichao RUN apk update && apk add openjdk8 shadow sudo RUN wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.4.6/elasticsearch-2.4.6.tar.gz RUN tar zxvf elasticsearch-2.4.6.tar.gz -C /usr/local/ && rm -f elasticsearch-2.4.6.tar.gz && /usr/local/elasticsearch-2.4.6/bin/plugin install mobz/elasticsearch-head RUN useradd elastic && chown -R elastic:elastic /usr/local/elasticsearch-2.4.6 COPY ./run.sh /root/ RUN chmod +x /root/run.sh CMD /root/run.sh
run.sh文件:
#!/bin/sh sed -i "s/node.name: node-1/node.name: $HOSTNAME/g" /usr/local/elasticsearch-2.4.6/config/elasticsearch.yml sudo -H -u elastic sh /usr/local/elasticsearch-2.4.6/bin/elasticsearch
上传镜像:
docker build -t elastic:2.4 docker tag elastic:1 192.168.78.4/es/elasticsearch:2.4.6 docker push 192.168.78.4/es/elasticsearch:2.4.6
二、在kubernetes上创建:
创建配置文件:
# ======================== Elasticsearch Configuration ========================= # # NOTE: Elasticsearch comes with reasonable defaults for most settings. # Before you set out to tweak and tune the configuration, make sure you # understand what are you trying to accomplish and the consequences. # # The primary way of configuring a node is via this file. This template lists # the most important settings you may want to configure for a production cluster. # # Please see the documentation for further information on configuration options: # <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html> # # ---------------------------------- Cluster ----------------------------------- # # Use a descriptive name for your cluster: # cluster.name: my-application # # ------------------------------------ Node ------------------------------------ # # Use a descriptive name for the node: # node.name: node-1 # # Add custom attributes to the node: # # node.rack: r1 # # ----------------------------------- Paths ------------------------------------ # # Path to directory where to store the data (separate multiple locations by comma): # # path.data: /path/to/data # # Path to log files: # # path.logs: /path/to/logs # # ----------------------------------- Memory ----------------------------------- # # Lock the memory on startup: # # bootstrap.memory_lock: true # # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory # available on the system and that the owner of the process is allowed to use this limit. # # Elasticsearch performs poorly when the system is swapping the memory. # # ---------------------------------- Network ----------------------------------- # # Set the bind address to a specific IP (IPv4 or IPv6): # network.host: 0.0.0.0 # # Set a custom port for HTTP: # http.port: 9200 # # For more information, see the documentation at: # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html> # # --------------------------------- Discovery ---------------------------------- # # Pass an initial list of hosts to perform discovery when new node is started: # The default list of hosts is ["127.0.0.1", "[::1]"] # discovery.zen.ping.unicast.hosts: ["es-service"] # # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): # discovery.zen.minimum_master_nodes: 2 # # For more information, see the documentation at: # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html> # # ---------------------------------- Gateway ----------------------------------- # # Block initial recovery after a full cluster restart until N nodes are started: # # gateway.recover_after_nodes: 3 # # For more information, see the documentation at: # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html> # # ---------------------------------- Various ----------------------------------- # # Disable starting multiple nodes on a single system: # # node.max_local_storage_nodes: 1 # # Require explicit names when deleting indices: # # action.destructive_requires_name: true
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG es.logger.level: INFO rootLogger: ${es.logger.level}, console, file logger: # log action execution errors for easier debugging action: DEBUG # deprecation logging, turn to DEBUG to see them deprecation: INFO, deprecation_log_file # reduce the logging for aws, too much is logged under the default INFO com.amazonaws: WARN # aws will try to do some sketchy JMX stuff, but its not needed. com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR com.amazonaws.metrics.AwsSdkMetrics: ERROR org.apache.http: INFO # gateway #gateway: DEBUG #index.gateway: DEBUG # peer shard recovery #indices.recovery: DEBUG # discovery #discovery: TRACE index.search.slowlog: TRACE, index_search_slow_log_file index.indexing.slowlog: TRACE, index_indexing_slow_log_file additivity: index.search.slowlog: false index.indexing.slowlog: false deprecation: false appender: console: type: console layout: type: consolePattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" file: type: dailyRollingFile file: ${path.logs}/${cluster.name}.log datePattern: "'.'yyyy-MM-dd" layout: type: pattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n" # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html #file: #type: extrasRollingFile #file: ${path.logs}/${cluster.name}.log #rollingPolicy: timeBased #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz #layout: #type: pattern #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" deprecation_log_file: type: dailyRollingFile file: ${path.logs}/${cluster.name}_deprecation.log datePattern: "'.'yyyy-MM-dd" layout: type: pattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" index_search_slow_log_file: type: dailyRollingFile file: ${path.logs}/${cluster.name}_index_search_slowlog.log datePattern: "'.'yyyy-MM-dd" layout: type: pattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" index_indexing_slow_log_file: type: dailyRollingFile file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log datePattern: "'.'yyyy-MM-dd" layout: type: pattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
deployment文件:
kind: Service apiVersion: v1 metadata: labels: elastic-app: elasticsearch name: es-service spec: ports: - name: business port: 9200 targetPort: 9200 - name: sync port: 9300 targetPort: 9300 selector: app: elasticsearch --- apiVersion: apps/v1 kind: Deployment metadata: name: elasticsearch2.4 labels: app: elasticsearch spec: replicas: 3 selector: matchLabels: app: elasticsearch template: metadata: labels: app: elasticsearch spec: containers: - name: es image: 192.168.78.4/es/elasticsearch:2.4.6 volumeMounts: - name: elasticsearch-yml mountPath: /usr/local/elasticsearch-2.4.6/config/ env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName ports: - containerPort: 9200 - containerPort: 9300 volumes: - name: elasticsearch-yml configMap: name: esconfig imagePullSecrets: - name: regsecret