• elastic stack安装运行(docker)


    https://www.docker.elastic.co

    注:目前阿里云为7.4


    elasticsearch


    参考
    https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html

    拉取镜像
    docker pull docker.elastic.co/elasticsearch/elasticsearch:7.5.0

    查看命令
    docker inspect docker.elastic.co/elasticsearch/elasticsearch:7.5.0

    mkdir /opt/elasticsearch -p
    vim /opt/elasticsearch/Dockerfile

    FROM docker.elastic.co/elasticsearch/elasticsearch:7.5.0
    EXPOSE 9200
    EXPOSE 9300

     9200是http  9300是tcp

    mkdir /opt/elasticsearch/usr/share/elasticsearch/data/ -p
    cat /etc/passwd
    data及其子目录(-R)赋予权限,否则无法写入数据
    chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/data/

    chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/logs/

    vim /opt/elasticsearch/docker-compose.yml

    version: '2.2'
    services:
      elasticsearch:
        image: v-elasticsearch
        restart: always
        container_name: elasticsearch
        build: 
          context: .
          dockerfile: Dockerfile
        ports:
          - "9200:9200"
          - "9300:9300"
        environment:
          - cluster.name=docker-cluster
          - discovery.type=single-node
          - bootstrap.memory_lock=true
          - network.host=0.0.0.0
          - http.cors.enabled=true
          - http.cors.allow-origin=*
          - ES_JAVA_OPTS=-Xms512m -Xmx512m
        ulimits:
          memlock:
            soft: -1
            hard: -1
        healthcheck:
          test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"]
          retries: 300
          interval: 1s
        volumes: 
          - ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data
          - ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs

    也可以用先写好配置文件的方式

    version: '2.2'
    services:
      elasticsearch:
        image: v-elasticsearch
        restart: always
        container_name: elasticsearch
        build: 
          context: .
          dockerfile: Dockerfile
        ports:
          - "9200:9200"
          - "9300:9300"
        environment:
          - ES_JAVA_OPTS=-Xms256m -Xmx256m
        ulimits:
          memlock:
            soft: -1
            hard: -1
        healthcheck:
          test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"]
          retries: 300
          interval: 1s
        volumes: 
          - ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data
          - ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs
          - ./usr/share/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml

    配置文件参考:
    https://github.com/elastic/elasticsearch-docker/tree/master/.tedi/template
    https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
    https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html

    mkdir -p /opt/elasticsearch/usr/share/elasticsearch/config/
    vim /opt/elasticsearch/usr/share/elasticsearch/config/elasticsearch.yml

    ---
    discovery.type: single-node
    bootstrap.memory_lock: true
    cluster.name: docker-cluster
    network.host: 0.0.0.0
    http.cors.enabled: true
    http.cors.allow-origin: "*"

    ES的密码管理是用x-pack来实现的
    默认账户为elastic默认密码为changme
    这里省略掉x-pack

    cd /opt/elasticsearch
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart


    查看日志
    docker logs --tail="500" elasticsearch

    检查状态
    curl http://127.0.0.1:9200/_cat/health

    netstat -anltp|grep 9200

    进入容器
    docker exec -it elasticsearch /bin/bash

    检查容器
    docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --help
    docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --version

    复制配置文件
    docker cp elasticsearch:/usr/share/elasticsearch/config/elasticsearch.yml /opt/elasticsearch/elasticsearch_bak.yml


    -----------------------------------

    elasticsearch-head

    mkdir /opt/elasticsearch-head -p
    vim /opt/elasticsearch-head/Dockerfile

    FROM mobz/elasticsearch-head:5
    EXPOSE 9100

    vim /opt/elasticsearch-head/docker-compose.yml

    version: '2.2'
    services:
      elasticsearch-head: 
        image: v-elasticsearch-head
        restart: always
        container_name: elasticsearch-head
        build: 
          context: .
          dockerfile: Dockerfile
        ports: 
          - 9100:9100

        environment:
          TZ: 'Asia/Shanghai'

    cd /opt/elasticsearch-head
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart


    查看日志
    docker logs --tail="500" elasticsearch-head


    netstat -anltp|grep 9100

    进入容器
    docker exec -it elasticsearch-head /bin/bash

    检查容器
    docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --help
    docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --version

    导出配置文件
    docker cp elasticsearch-head:/usr/src/app/Gruntfile.js /opt/elasticsearch-head/Gruntfile.js

    mkdir /opt/elasticsearch-head/_site
    docker cp elasticsearch-head:/usr/src/app/_site/app.js /opt/elasticsearch-head/_site/app.js

    docker-compose加入

    volumes:
    - ./Gruntfile.js:/usr/src/app/Gruntfile.js
    - ./_site/app.js:/usr/src/app/_site/app.js

    chown 1000:1000 -R /opt/elasticsearch-head/

    重新生成创建启动

    Gruntfile.js加入hostname: '*'

                    connect: {
                            server: {
                                    options: {
                                            hostname: '0.0.0.0',
                                            port: 9100,
                                            base: '.',
                                            keepalive: true
                                    }
                            }
                    }

    http://192.168.0.101:9100/?auth_user=elastic&auth_password=123456


    ------------------------------------

    kibana

    https://www.elastic.co/guide/en/kibana/current/docker.html

    docker pull docker.elastic.co/kibana/kibana:7.5.0

    docker inspect docker.elastic.co/kibana/kibana:7.5.0


    mkdir /opt/kibana -p
    vim /opt/kibana/Dockerfile

    FROM docker.elastic.co/kibana/kibana:7.5.0
    EXPOSE 5601

    vim /opt/kibana/docker-compose.yml  

    version: '2.2'
    services:
      kibana:
        image: v-kibana
        restart: always
        container_name: kibana
        build: 
          context: .
          dockerfile: Dockerfile
        environment:
          - SERVER_NAME=kibana
          - SERVER_HOST=0.0.0.0
          - ELASTICSEARCH_HOSTS=http://elasticsearchserver:9200
          - KIBANA_DEFAULTAPPID=discover
          - I18N_LOCALE=zh-CN
          - XPACK_MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLE=true
          - ELASTICSEARCH_USERNAME="elastic"
          - ELASTICSEARCH_PASSWORD="123456"
        healthcheck:
          test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"]
          retries: 300
          interval: 1s
        ports:
          - 5601:5601
        volumes: 
          - /var/log/kibana:/var/log/kibana
          - /var/lib/kibana:/var/lib/kibana
        extra_hosts:
          - "elasticsearchserver:192.168.0.101"

    无法设置path.data

    也可以用先写好配置文件的方式

    vim /opt/kibana/docker-compose.yml

    version: '2.2'
    services:
      kibana:
        image: v-kibana
        restart: always
        container_name: kibana
        build: 
          context: .
          dockerfile: Dockerfile
        healthcheck:
          test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"]
          retries: 300
          interval: 1s
        ports:
          - 5601:5601
        volumes: 
          - ./usr/share/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
          - /var/lib/kibana:/var/lib/kibana
        extra_hosts:
          - "elasticsearchserver:192.168.0.101"

    mkdir -p /opt/kibana/usr/share/kibana/config/
    vim /opt/kibana/usr/share/kibana/config/kibana.yml

    server.name: kibana
    server.host: "0.0.0.0"
    path.data: /var/lib/kibana
    elasticsearch.hosts: [ "http://elasticsearchserver:9200" ]
    kibana.defaultAppId: discover
    i18n.locale: zh-CN
    xpack.monitoring.ui.container.elasticsearch.enabled: true
    elasticsearch.username: "elastic"
    elasticsearch.password: "123456"

    带x-pack

    elasticsearch.username: "elastic"
    elasticsearch.password: "123456"

    mkdir -p /var/lib/kibana;chmod -R 777 /var/lib/kibana

    cd /opt/kibana
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart


    查看日志
    docker logs --tail="500" kibana

    检查状态
    curl http://192.168.1.101:5601/_cat/health

    netstat -anltp|grep 5601

    进入容器
    docker exec -it kibana /bin/bash

    检查容器
    docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --help
    docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --version

    复制配置文件
    docker cp kibana:/usr/share/kibana/config/kibana.yml /opt/kibana/kibana_bak.yml

    ------------------------

    logstash

    参考
    https://www.elastic.co/guide/en/logstash/current/docker.html
    https://www.elastic.co/guide/en/logstash/current/docker-config.html

    docker pull docker.elastic.co/logstash/logstash:7.5.0

    docker inspect docker.elastic.co/logstash/logstash:7.5.0

    mkdir /opt/logstash -p
    vim /opt/logstash/Dockerfile

    jdk8

    FROM openjdk:8 AS jdk
    FROM docker.elastic.co/logstash/logstash:7.5.0
    COPY --from=jdk /usr/local/openjdk-8 /usr/local
    ENV JAVA_HOME=/usr/local/openjdk-8
    ENV PATH=$JAVA_HOME/bin:$PATH
    ENV CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    EXPOSE 9600
    EXPOSE 9601
    EXPOSE 9602 EXPOSE 5044

    内置jdk会抛警告

    FROM docker.elastic.co/logstash/logstash:7.5.0
    EXPOSE 9600
    EXPOSE 5044

    vim /opt/logstash/docker-compose.yml

    version: '2.2'
    services:
      logstash:
        image: v-logstash
        restart: always
        container_name: logstash
        build: 
          context: .
          dockerfile: Dockerfile
        environment:
          - HTTP.HOST=0.0.0.0
          - PATH_DATA=/var/lib/logstash/data
    - PATH_LOGS=/var/log/logstash - XPACK_MONITORING_ELASTICSEARCH_HOSTS=http:
    //elasticsearchserver:9200 - XPACK_MONITORING_ENABLED=true - XPACK_MONITORING_ELASTICSEARCH_USERNAME="elastic" - XPACK_MONITORING_ELASTICSEARCH_PASSWORD="123456" ports: - 9600:9600 - 5044:5044 healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"] retries: 300 interval: 1s volumes:
    - /var/log/logstash:/var/log/logstash - /var/lib/logstash/data:/var/lib/logstash/data - ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf - /var/lib/logstash/data1:/var/lib/logstash/data1 - ./usr/share/logstash/pipeline/logstash2.conf:/usr/share/logstash/pipeline/logstash2.conf - /var/lib/logstash/data1:/var/lib/logstash/data2 extra_hosts: -
    "elasticsearchserver:192.168.0.101" - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101"
        command: /usr/share/logstash/bin/logstash -f /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

    也可以用先写好配置文件的方式

    vim /opt/logstash/docker-compose.yml

    version: '2.2'
    services:
      logstash:
        image: v-logstash
        restart: always
        container_name: logstash
        build: 
          context: .
          dockerfile: Dockerfile
        ports:
          - 9600:9600
          - 5044:5044
        healthcheck:
          test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"]
          retries: 300
          interval: 1s
        volumes: 
          - ./usr/share/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
    - /var/log/logstash:/var/log/logstash - /var/lib/logstash/data:/var/lib/logstash/data - ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf - /var/lib/logstash/data1:/var/lib/logstash/data1 - ./usr/share/logstash/pipeline/logstash2.conf:/usr/share/logstash/pipeline/logstash2.conf - /var/lib/logstash/data2:/var/lib/logstash/data2 extra_hosts: -
    "elasticsearchserver:192.168.0.101" - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101"
        command: /usr/share/logstash/bin/logstash -f /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

    mkdir -p /var/log/logstash;chmod -R 777 /var/log/logstash
    mkdir -p /var/lib/logstash/data;chmod -R 777 /var/lib/logstash/data
    mkdir -p /var/lib/logstash/data1;chmod -R 777 /var/lib/logstash/data1
    mkdir -p /var/lib/logstash/data2;chmod -R 777 /var/lib/logstash/data2

    mkdir -p /opt/logstash/usr/share/logstash/config/
    vim /opt/logstash/usr/share/logstash/config/logstash.yml

    http.host: 0.0.0.0
    path.data: /var/lib/logstash/data
    path.logs: /var/log/logstash xpack.monitoring.elasticsearch.hosts: [
    "http://elasticsearchserver:9200" ] xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.username: "elastic" xpack.monitoring.elasticsearch.password: "123456"

    带x-pack

    xpack.monitoring.elasticsearch.username: "elastic"
    xpack.monitoring.elasticsearch.password: "123456"

    mkdir /opt/logstash/usr/share/logstash/pipeline/ -p

    vim /opt/logstash/usr/share/logstash/pipeline/logstash1.conf

    input{
            stdin{
            }
    }
    output {
            elasticsearch {
                    hosts => ["elasticsearchserver:9200"]
            }
    }
    input{
        stdin{
        }
    }
    output {
        elasticsearch {
            hosts => ["elasticsearchserver:9200"]
            user => "elastic"
            password => "changeme"
        }
    }
    input{stdin{}}output{stdout{codec=>rubydebug}}
    input{stdin{}}output{stdout{codec=>json_lines}}
    input {
      beats {
        port => 5044
      }
    }
    output {
      stdout {
        codec => rubydebug
      }
    }

    ##host:port是logback中appender中的 destination

    logback->logstash->es

    input {
        tcp {
            port => "9601"
            mode => "server"
            tags => "tags_test"
            codec => json_lines
        }
    }
    output {
        elasticsearch {
            hosts => "elasticsearchserver:9200"
            index => "log-demo1"
            user => "elastic"
            password => "123456"
        }
    }

    tags的值自行设置

    如果json文件比较长,需要换行的话,建议用codec=>"json_lines"插件

    logback->kafka->logstash->es

    input {
        kafka {
            topics => "log"
            bootstrap_servers => "kafkaserver1:9011,kafkaserver2:9012,kafkaserver3:9013"
            group_id => "log_1"
            auto_offset_reset => "earliest"
            consumer_threads => "5"
            decorate_events => "false"
            type => "spring-boot-log"
            tags => "tags_test"
            codec => "json"
        }
    }
    filter {
    }
    output {
        stdout {
            codec => rubydebug
        }
        elasticsearch {
            hosts => "elasticsearchserver:9200"
            index => "log-kafka-demo1"
            user => "elastic"
            password => "123456"
        }
    }

    若logstash集群同一个groupid ,不会出现logstash重复消费kafka集群的问题

    如果logback的kafka输出pattern不是json格式,logstash需设置为codec => "plain"

    如果logback的kafka输出pattern为json格式,logstash需设置为codec => "json",不能是"json_lines"

    pattern如:

    <appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender">
            <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
                <providers>
                    <timestamp>
                        <timeZone>UTC</timeZone>
                    </timestamp>
                    <pattern>
                        <pattern>{"contextName ":"%contextName ","relative":"%relative","time":"%date{yyyy-MM-dd HH:mm:ss.SSS}","file":"%file","class":"%class","method":"%method","contextName":"%contextName","line":"%line","logger_name":"%logger","thread_name":"%thread","level":"%-5level","host":"%host","hostName":"%hostName","port":"${server.port}","message":"%message","stack_trace":"%xException{5}"}</pattern>
                    </pattern>
                </providers>
            </encoder>
    <!--        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">-->
    <!--            <pattern>{"contextName ":"%contextName ","relative":"%relative","time":"%date{yyyy-MM-dd HH:mm:ss.SSS}","file":"%file","class":"%class","method":"%method","contextName":"%contextName","line":"%line","logger_name":"%logger","thread_name":"%thread","level":"%-5level","host":"%host","hostName":"%hostName","port":"${server.port}","message":"%message","stack_trace":"%xException{5}"}</pattern>-->
    <!--            <charset>UTF-8</charset>-->
    <!--        </encoder>-->
            <topic>log</topic>
            <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
            <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
            <producerConfig>bootstrap.servers=${Kafka_Log_Servers}</producerConfig>
        </appender>

    所有key带s的参数不要用["xxx","yyy"] 的格式,应该用"xxx,yyy"的格式

    cd /opt/logstash
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart


    查看日志
    docker logs --tail="500" logstash
    docker logs -f logstash

    进入容器
    docker exec -it logstash /bin/bash

    检查容器
    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --help
    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --version

    netstat -anltp|grep 9600
    netstat -anltp|grep 5044

    复制配置文件
    docker cp logstash:/usr/share/logstash/config/logstash.yml /opt/logstash/logstash_bak.yml
    docker cp logstash:/usr/share/logstash/pipeline/logstash1.conf /opt/logstash/logstash1_bak.conf


    测试
    数据放data1以另一个实例运行
    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'
    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>json_lines}}'
    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{elasticsearch{hosts=>"192.168.1.101:9200"}}'

    docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash -f  /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

    ----------------------------
    Filebeat

    docker pull docker.elastic.co/beats/filebeat:7.5.0

    docker inspect docker.elastic.co/beats/filebeat:7.5.0

    参考
    https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html

    mkdir /opt/filebeat -p
    vim /opt/filebeat/Dockerfile

    FROM docker.elastic.co/beats/filebeat:7.5.0

    vim /opt/filebeat/docker-compose.yml

    version: '2.2'
    services:
      filebeat:
        image: v-filebeat
        restart: always
        container_name: filebeat
        build: 
          context: .
          dockerfile: Dockerfile
        volumes: 
          - ./usr/share/filebeat/data/:/usr/share/filebeat/data/
          - ./usr/share/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml

    mkdir /opt/filebeat/usr/share/filebeat -p
    vim /opt/filebeat/usr/share/filebeat/filebeat.yml

    filebeat.config:
      modules:
        path: ${path.config}/modules.d/*.yml
        reload.enabled: false
    processors:
    - add_cloud_metadata: ~
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:192.168.1.101:9200}'
      username: '${ELASTICSEARCH_USERNAME:}'
      password: '${ELASTICSEARCH_PASSWORD:}'

    output.elasticsearch:
      hosts: ["192.168.1.101:9200"]

    output.logstash:
      hosts: ["192.168.1.101:5044"]

    cd /opt/filebeat
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart

    查看日志
    docker logs --tail="500" filebeat
    docker logs -f filebeat

    进入容器
    docker exec -it filebeat /bin/bash

    检查容器
    docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --help
    docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --version

    复制配置文件
    docker cp filebeat:/usr/share/filebeat/filebeat.yml /opt/filebeat/filebeat_bak.yml

    ---------------------------------------

    apm-server

    docker pull docker.elastic.co/apm/apm-server:7.5.0

    docker inspect docker.elastic.co/apm/apm-server:7.5.0

    参考
    https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html


    mkdir /opt/apm-server -p
    vim /opt/apm-server/Dockerfile

    FROM docker.elastic.co/apm/apm-server:7.5.0
    EXPOSE 8200

    docker-compose参考
    https://github.com/elastic/apm-server/blob/master/tests/docker-compose.yml

    vim /opt/apm-server/docker-compose.yml

    version: '2.2'
    services:
      apm-server:
        image: v-apm-server
        restart: always
        container_name: apm-server
        build: 
          context: .
          dockerfile: Dockerfile
        environment:
          - output.elasticsearch.hosts=['http://192.168.1.101:9200']
          - output.elasticsearch.username: elastic
          - output.elasticsearch.password: 123456
          - apm-server.host="0.0.0.0:8200"
          - setup.kibana.host="192.168.1.101:5601"
        ports:
          - 8200:8200
        healthcheck:
          test: ["CMD", "curl", "-f" ,"http://127.0.0.1:8200/"]
        command: apm-server -e -d "*" -E apm-server.host="0.0.0.0:8200" -E apm-server.expvar.enabled=true -E output.elasticsearch.hosts=['http://192.168.1.101:9200']

    command: apm-server -e -d "*" -E apm-server.host="0.0.0.0:8200" -E apm-server.expvar.enabled=true -E output.elasticsearch.hosts=['http://192.168.1.101:9200']

    volumes:
    - ./usr/share/apm-server/apm-server.yml:/usr/share/apm-server/apm-server.yml

    volumes:
    - ./usr/share/apm-server/data:/usr/share/apm-server/data:ro


    cd /opt/apm-server
    docker-compose build
    docker-compose up -d --force-recreate
    docker-compose down
    docker-compose restart

    查看日志
    docker logs --tail="500" apm-server
    docker logs -f apm-server

    进入容器
    docker exec -it apm-server /bin/bash

    检查容器
    docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --help
    docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --version


    复制配置文件
    docker cp apm-server:/usr/share/apm-server/apm-server.yml /opt/apm-server/apm-server_bak.yml

  • 相关阅读:
    基于AjaxHelper的企业门户网站构架示例
    重读GoF
    来一点反射,再来一点Emit —— 极度简化Entity!
    Component/Service Oriented Software System Development Thinking
    重新诠释AOP
    With AOP, Component Oriented == Object Oriented
    没有ORM或代码生成数据就不能持久化了? 用范型技术代替代码生成!
    Dalvik Debug Monitor Service(DDMS)的使用
    Android中的布局 Layout
    堆排序 Heap Sort
  • 原文地址:https://www.cnblogs.com/wintersoft/p/11213475.html
Copyright © 2020-2023  润新知