• (OK) Fedora23——Docker——CORE—testing



    (OK) INSTALL CORE on Fedora 23 or CentOS 7

    //Fedora 23 &CentOS 7

    [root@localhost core]#./bootstrap.sh
    [root@localhost core]# ./configure --with-startup=systemd
    [root@localhost core]# make
    [root@localhost core]# make install 


    [root@localhost core]# systemctl daemon-reload
    [root@localhost core]# systemctl start core-daemon.service
    [root@localhost core]# core-gui




    NOTE: /root/.core/configs/m-MPE-manet.imn


    +++++++++++++++++++++++++++++++++++++++++++


    [root@localhost core]# systemctl daemon-reload
    [root@localhost core]# systemctl start core-daemon.service
    ---------------------------------------------------------------------------
    [root@localhost core]# core-gui
    ----------------------
    /root/.core/configs/m-MPE-manet.imn
    ----------------------
    Under the Session Menu, the Options... dialog has an option to set a control network prefix.
    This can be set to a network prefix such as 172.16.0.0/24. A bridge will be created on the host machine having the last address in the prefix range (e.g. 172.16.0.254), and each node will have an extra ctrl0 control interface configured with an address corresponding to its node number (e.g. 172.16.0.3 for n3.)
    ----------------------
    [root@localhost doc]# ifconfig
    enp13s0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 192.168.0.100  netmask 255.255.255.0  broadcast 192.168.0.255
            inet6 fe80::3e97:eff:fef0:b5bb  prefixlen 64  scopeid 0x20<link>
            ether 3c:97:0e:f0:b5:bb  txqueuelen 1000  (Ethernet)
            RX packets 424786  bytes 474479916 (452.4 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 402854  bytes 46953257 (44.7 MiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

    b.ctrl0net.6a: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.16.0.254  netmask 255.255.255.0  broadcast 0.0.0.0
            inet6 fe80::bc49:1ff:fe27:a95  prefixlen 64  scopeid 0x20<link>
            ether 16:32:81:19:ca:43  txqueuelen 1000  (Ethernet)
            RX packets 149  bytes 12753 (12.4 KiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 84  bytes 8808 (8.6 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    ----------------------
    [root@localhost core]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         192.168.0.1     0.0.0.0         UG    100    0        0 enp13s0
    172.16.0.0      0.0.0.0         255.255.255.0   U     0      0        0 b.ctrl0net.6a
    192.168.0.0     0.0.0.0         255.255.255.0   U     100    0        0 enp13s0
    192.168.122.0   0.0.0.0         255.255.255.0   U     0      0        0 virbr0
    ----------------------
    [root@localhost 桌面]# . iptables_core.sh
    [root@localhost 桌面]# cat iptables_core.sh

        #!/bin/bash
        echo 1 > /proc/sys/net/ipv4/ip_forward
        echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts
        echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all
        echo 1 > /proc/sys/net/ipv4/tcp_syncookies
        #网卡:上外、下内
        #上外 192.168.0.100
        #下内 172.16.0.254
        #INET_IF="ppp0"
        INET_IF="enp13s0"
        LAN_IF="b.ctrl0net.6a"
        #INET_IP="192.168.0.100"
        INET_IP="10.108.162.164"
        LAN_IP="172.16.0.254"
        LAN_IP_RANGE="172.16.0.0/24"
        #LAN_WWW="172.16.0.6"
        IPT="/sbin/iptables"
        #TC="/sbin/tc"
        MODPROBE="/sbin/modprobe"
        $MODPROBE ip_tables
        $MODPROBE iptable_nat
        $MODPROBE ip_nat_ftp
        $MODPROBE ip_nat_irc
        $MODPROBE ipt_mark
        $MODPROBE ip_conntrack
        $MODPROBE ip_conntrack_ftp
        $MODPROBE ip_conntrack_irc
        $MODPROBE ipt_MASQUERADE
        for TABLE in filter nat mangle ; do
        $IPT -t $TABLE -F
        $IPT -t $TABLE -X
        $IPT -t $TABLE -Z
        done
        $IPT -P INPUT DROP
        $IPT -P OUTPUT ACCEPT
        $IPT -P FORWARD DROP
        $IPT -t nat -P PREROUTING ACCEPT
        $IPT -t nat -P OUTPUT ACCEPT
        $IPT -t nat -P POSTROUTING ACCEPT
        # 拒绝INTERNET客户访问
        #$IPT -A INPUT -i $INET_IF -m state --state RELATED,ESTABLISHED -j ACCEPT
        $IPT -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
        #$IPT -A INPUT -i $INET_IF -p tcp -s 123.5.0.0/16 --dport 22 -j ACCEPT
        $IPT -A INPUT -p tcp --dport 22 -j ACCEPT
        $IPT -A INPUT -i $INET_IF -m state --state NEW,INVALID -j DROP
        for DNS in $(grep ^n /etc/resolv.conf|awk '{print $2}'); do
        $IPT -A INPUT -p tcp -s $DNS --sport domain -j ACCEPT
        $IPT -A INPUT -p udp -s $DNS --sport domain -j ACCEPT
        done
        # anti bad scaning
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags ALL FIN,URG,PSH -j DROP
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags ALL ALL -j DROP
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags ALL SYN,RST,ACK,FIN,URG -j DROP
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags ALL NONE -j DROP
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
        $IPT -A INPUT -i $INET_IF -p tcp --tcp-flags SYN,FIN SYN,FIN -j DROP
        #$IPT -t nat -A PREROUTING -d $INET_IP -p tcp --dport 8008 -j DNAT --to-destination $LAN_WWW:8008
        #$IPT -t nat -A PREROUTING -d $INET_IP -p tcp --dport 22 -j DNAT --to-destination $LAN_WWW:22
        if [ $INET_IF = "ppp0" ] ; then
        $IPT -t nat -A POSTROUTING -o $INET_IF -s $LAN_IP_RANGE -j MASQUERADE
        else
        $IPT -t nat -A POSTROUTING -o $INET_IF -s $LAN_IP_RANGE -j SNAT --to-source $INET_IP
        fi
        #no limit
        #$IPT -A FORWARD -s 192.168.1.216 -m mac --mac-source 00:15:17:F7:AB:84 -j ACCEPT
        #$IPT -A FORWARD -d 192.168.1.216 -j ACCEPT
        #$IPT -A FORWARD -p tcp -d ! $LAN_IP_RANGE -m multiport --dports ! 20,21,22,25,53,80,110,443,8080 -j DROP
        #$IPT -A FORWARD -p udp -d ! $LAN_IP_RANGE -m multiport --dports ! 20,21,22,25,53,80,110,443,8080 -j DROP
        #MAC、IP地址绑定
        #$IPT -A FORWARD -s 192.168.1.11 -m mac --mac-source 44-87-FC-44-B9-6E -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.1 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.2 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.3 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.4 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.5 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.6 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.7 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.8 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.9 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.10 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.11 -j ACCEPT
        $IPT -A FORWARD -s 172.16.0.12 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.1 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.2 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.3 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.4 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.5 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.6 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.7 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.8 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.9 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.10 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.11 -j ACCEPT
        $IPT -A FORWARD -d 172.16.0.12 -j ACCEPT



    ---------------------------------------------------------------------------
    [root@n6 n6.conf]# ifconfig
    ctrl0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.16.0.6  netmask 255.255.255.0  broadcast 0.0.0.0
            inet6 fe80::216:3eff:fec0:b7a4  prefixlen 64  scopeid 0x20<link>
            ether 00:16:3e:c0:b7:a4  txqueuelen 1000  (Ethernet)
            RX packets 143  bytes 15449 (15.0 KiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 60  bytes 5273 (5.1 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 10.0.0.6  netmask 255.255.255.255  broadcast 0.0.0.0
            inet6 a::6  prefixlen 128  scopeid 0x0<global>
            inet6 fe80::200:ff:feaa:5  prefixlen 64  scopeid 0x20<link>
            ether 00:00:00:aa:00:05  txqueuelen 1000  (Ethernet)
            RX packets 8182  bytes 904248 (883.0 KiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 2735  bytes 301738 (294.6 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    ----------------------
    [root@n6 n6.conf]# route add default gw 172.16.0.254
    [root@n6 n6.conf]# route -n          
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         172.16.0.254    0.0.0.0         UG    0      0        0 ctrl0
    10.0.0.1        10.0.0.5        255.255.255.255 UGH   4      0        0 eth0
    10.0.0.2        10.0.0.5        255.255.255.255 UGH   4      0        0 eth0
    10.0.0.3        10.0.0.5        255.255.255.255 UGH   3      0        0 eth0
    10.0.0.4        10.0.0.5        255.255.255.255 UGH   3      0        0 eth0
    10.0.0.5        0.0.0.0         255.255.255.255 UH    1      0        0 eth0
    10.0.0.7        0.0.0.0         255.255.255.255 UH    1      0        0 eth0
    10.0.0.8        10.0.0.5        255.255.255.255 UGH   4      0        0 eth0
    10.0.0.9        0.0.0.0         255.255.255.255 UH    1      0        0 eth0
    10.0.0.10       10.0.0.5        255.255.255.255 UGH   2      0        0 eth0
    10.0.0.11       10.0.0.5        255.255.255.255 UGH   5      0        0 eth0
    172.16.0.0      0.0.0.0         255.255.255.0   U     0      0        0 ctrl0

    [root@n6 n6.conf]# cat /etc/resolv.conf
    # Generated by NetworkManager
    nameserver 10.3.9.4
    nameserver 10.3.9.5
    nameserver 10.3.9.6

    [root@n6 n6.conf]# ping www.bupt.edu.cn
    PING www.bupt.edu.cn (10.3.9.254) 56(84) bytes of data.
    64 bytes from 10.3.9.254: icmp_seq=1 ttl=58 time=0.751 ms
    64 bytes from 10.3.9.254: icmp_seq=2 ttl=58 time=0.727 ms
    64 bytes from 10.3.9.254: icmp_seq=3 ttl=58 time=0.936 ms
    ^C
    --- www.bupt.edu.cn ping statistics ---
    3 packets transmitted, 3 received, 0% packet loss, time 2002ms
    rtt min/avg/max/mdev = 0.727/0.804/0.936/0.098 ms
    [root@n6 n6.conf]#

    ---------------------------------------------------------------------------
    至此,CORE虚拟节点访问互联网 成功


    ---------------------------------------------------------------------------
    下面在CORE虚拟节点中 使用docker —— 前提,CORE虚拟节点访问互联网 成功
    ---------------------------------------------------------------------------

    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^OK OK
    ---------Install Docker on Fedora 23
    dnf update -y
    dnf -y install docker-io
    systemctl start docker

    systemctl start docker.service
    systemctl restart docker.service
    systemctl stop docker.service
    systemctl status docker.service

    systemctl status docker -l

    ^^^^^^宿主机测试
    docker images
    docker search centos
    docker pull centos
    docker images
    docker tag 2933d50b9f77 docker.io/centos:core
    docker run centos echo "hello world!"
    ^^^^^^至此,宿主机 OK

    docker run --rm -it centos /bin/bash
    docker run --rm -it -d centos /bin/bash
    docker run --rm centos echo "hello world!"
    docker run centos echo "hello world!"
    docker run --cap-add=NET_ADMIN --cap-add=SYS_ADMIN centos echo "hello world!"
    docker run --privileged  -ti -v /sys/fs/cgroup:/sys/fs/cgroup centos echo "hello world!"
    docker run --privileged  -ti -e "container=docker"  -v /sys/fs/cgroup:/sys/fs/cgroup  centos  /usr/sbin/init

    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^problem & resolve
    [root@localhost ~]# systemctl start docker.service
    Job for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details.

    systemctl status docker -l

    rm /var/lib/docker/overlay/ -rf
    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^OK OK
    [root@localhost core]# systemctl daemon-reload
    [root@localhost core]# systemctl start core-daemon.service
    [root@localhost core]# core-gui

    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^OK OK in CORE node
    [root@n6 n6.conf]#
    docker daemon &
    docker images
    docker run centos echo "hello world!"

    docker run --rm -it centos /bin/bash


    docker run --rm -it busybox:core /bin/sh


    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

    ---------------------------------------------------------------------------
    至此,在CORE虚拟节点中 使用docker 成功
    ---------------------------------------------------------------------------


    NOTE:

    step 1. start /root/.core/configs/m-MPE-manet.imn
    step 2. . iptables_core.sh
    step 3. systemctl restart firewalld.service
    step 4. stop /root/.core/configs/m-MPE-manet.imn




    下面是历史命令,非必需看


    # Docker的存储机制采用了非常灵活的模块化设计,目前支持5种存储引擎,分别为aufs、btrfs、device mapper、vfs和overlay。它们的共同特点都是实现了graphdriver.Driver接口


    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^no use
    (need)警告:文件 /usr/lib/systemd/system/docker.service: 移除失败: 没有那个文件或目录
    (not need)ls: 无法访问/etc/systemd/system/docker.service: 没有那个文件或目录

    [root@localhost ~]# dnf install docker
    [root@localhost ~]# dnf install docker-registry
    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

    systemctl daemon-reload

    systemctl start docker.service
    systemctl restart docker.service
    systemctl stop docker.service
    systemctl status docker.service

    docker daemon --storage-opt dm.basesize=3G

    docker daemon -D -s overlay &
    docker daemon -s overlay &
    docker daemon -s devicemapper &

    docker tag 0e0217391d41 docker.io/centos:core

    docker info
    docker version
    uname -a

    docker images
    docker search centos
    docker pull centos
    docker pull stuartmarsden/dockercoreemu
    docker images
    docker run --rm -it centos /bin/bash
    docker run --rm -it -d centos /bin/bash
    docker run --rm centos echo "hello world!"
    docker run centos echo "hello world!"

    docker run --cap-add=NET_ADMIN --cap-add=SYS_ADMIN centos echo "hello world!"

    docker run --privileged  -ti -v /sys/fs/cgroup:/sys/fs/cgroup centos echo "hello world!"

    docker run --privileged  -ti -e "container=docker"  -v /sys/fs/cgroup:/sys/fs/cgroup  centos  /usr/sbin/init

    ---------------------------------------------------------------------------

    cat /proc/self/cgroup
    cat /proc/self/mountinfo

    cat /proc/cgroups
    cat /proc/cmdline

    mount -t cgroup        或者使用lssubsys命令:lssubsys -m, need install 软件包“libcgroup-tools”以提供命令“lssubsys”
    mount -t cgroup -o memory memory /sys/fs/cgroup/memory/

    iotop    可以看到相关的IO速度, need install 软件包“iotop”以提供命令“iotop”

    ps, top这些命令会去读/proc文件系统
    ldd /bin/ls        ldd - print shared library dependencies

    ip link show    或    ip addr show    查看当前宿主机的网络情况


    [root@n6 n6.conf]# docker ps && docker inspect     查看容器进程的 cid
    [root@n6 n6.conf]# pscap | grep cid

    [root@n6 n6.conf]# top
    TERM environment variable not set.
    [root@n6 n6.conf]# top -bcn 1            or        export TERM=linux

    docker tag c8a648134623 docker.io/centos:core

    rm -rf /var/lib/docker
    rm /etc/docker/key.json    //Docker Daemon在启动时,会为自身赋一个ID值,这个ID值通过trustKey来创建,trustkey存放的位置key.json
    systemctl start docker.service
    systemctl stop docker.service
    systemctl status docker.service

    SUID和GUID二进制文件。这些类型的二进制文件可以在Linux系统中运行以下命令而找到:
      find / -perm -4000 -exec ls -l {} ; 2>/dev/null
      find / -perm -2000 -exec ls -l {} ; 2>/dev/null
      可以使用类似于下面的[11]命令将SUID和GUID文件权限删除然后:
      sudo chmod u-s filename sudo chmod -R g-s directory

    /proc和sysfs文件系统中有单个文件4k大小的限制

    ---------------------------------------------------------------------------
    Created the overlay network as:
    docker network create -d overlay --subnet="192.168.252.0/16" --gateway="192.168.252.1" hcf
    ---------------------------------------------------------------------------
    Ran the container as:
    docker run -d --restart=always --net=hcf -p=8000:80 --name=testhttp -t nginx
    ---------------------------------------------------------------------------

    一直用CentOS 6 习惯了,一下没适应过来。防火墙配置后执行service iptables save 出现"Failed to restart iptables.service: Unit iptables.service failed to load: No such file or directory."错误,在CentOS 7或RHEL 7或Fedora中防火墙由firewalld来管理,当然你可以还原传统的管理方式。或则使用新的命令进行管理。
    假如采用传统请执行一下命令:
    systemctl stop firewalld
    systemctl mask firewalld
    并且安装iptables-services:
    yum install iptables-services
    设置开机启动:
    systemctl enable iptables
    systemctl [stop|start|restart] iptables
    #or
    service iptables [stop|start|restart]

    service iptables save
    #or
    /usr/libexec/iptables/iptables.init save
    ---------------------------------------------------------------------------
    systemctl stop firewalld
    systemctl start firewalld
    systemctl restart firewalld

    [root@localhost core]#
    ip link add gt0 type gretap remote 192.168.0.100 local 192.168.0.111 key 1
    ip addr add 10.0.0.2/24 dev gt0
    ip link set dev gt0 up
    ip link set dev gt0 down

    route add default dev eth0
    route add default gw 192.168.1.1

    route del default gw 10.108.160.1

    brctl show
    ifconfig b.2.ac down
    brctl delbr b.2.ac


    systemctl daemon-reload
    systemctl start core-daemon.service
    systemctl stop core-daemon.service

    iptables -F

    service docker start

    systemctl --system
    busctl --system

    netstat -nr

    docker daemon -D -s overlay -H fd:// &


    +++++++++++++++++++++++++++++++++++++++++++++++++
    http://ju.outofmemory.cn/entry/114344
    --------------------------------------
    默认配置下,Docker镜像和容器存储路径($DOCKROOT)位于/var/lib/docker,如果选择的是aufs文件系统作为存储引擎,那么它的子目录树结构(基于docker 1.4.1)应该如下:

    # tree .
    ├── aufs
    │   ├── diff   镜像和容器每层的差异内容
    │   ├── layers   镜像和容器每层的继承关系
    │   └── mnt  容器挂载点
    ├── containers  容器配置文件,环境变量和日志文件
    ├── graph 镜像详情、大小等
    └── repositories-aufs  镜像摘要
    +++++++++++++++++++++++++++++++++++++++++++++++++
    [root@n6 n6.conf]# service docker start
    Redirecting to /bin/systemctl start  docker.service
    Failed to get D-Bus connection: 不允许的操作

    [root@n6 n6.conf]# iptables -L    

    [root@n6 n6.conf]# docker daemon
    [root@n6 n6.conf]# docker daemon -D &

    //Docker daemon supports for several different image layer storage drivers: aufs, devicemapper, btrfs, zfs and overlay
    // OK OK OK
    [root@n6 n6.conf]# docker daemon -D -s overlay &

    [root@n6 n6.conf]#
    ping 10.108.162.164
    ping www.baidu.com
    ping 10.0.0.1

    [root@n6 n6.conf]# netstat -nr

    pkill docker
    iptables -t nat -F
    ifconfig docker0 down
    brctl delbr docker0
    sudo service docker start


    [root@n6 n6.conf]# docker daemon -D -s overlay -H fd:// &

    [root@n6 n6.conf]# ps -ef
    [root@n6 n6.conf]# lsof -i -P


    [root@n6 n6.conf]# docker run -i -t centos /bin/bash
    [root@n6 n6.conf]# docker run -it --dns=10.3.9.4 centos ping -w1 www.baidu.com
    [root@n6 n6.conf]# docker run --dns 10.3.9.4 --dns 10.3.9.5 --dns 10.3.9.6 -i -t centos /bin/bash

    [root@n6 n6.conf]# tshark -i eth0 -i docker0
    [root@n6 n6.conf]# ip addr


    docker network inspect bridge
    tcpdump -i docker0 icmp [-v]
    iptables -t filter -nvL

    [root@n6 n6.conf]# fg

    --------------------------------
    problem:

    [root@n6 n6.conf]# docker info
    DEBU[0482] Calling GET /v1.21/info                      
    DEBU[0482] GET /v1.21/info                              
    Containers: 0
    Images: 0
    Server Version: 1.9.1-fc23
    Storage Driver: overlay
     Backing Filesystem: extfs
    Execution Driver: native-0.2
    Logging Driver: json-file
    Kernel Version: 4.3.3-301.fc23.x86_64
    Operating System: Fedora 23 (Twenty Three) (containerized)
    CPUs: 4
    Total Memory: 7.527 GiB
    Name: n6
    ID: NGHW:BCHL:KXI7:FFL7:F3WM:T5ZQ:MYXU:IPW5:FJX7:YW5U:4OV2:VYZJ
    Debug mode (server): true
     File Descriptors: 11
     Goroutines: 23
     System Time: 2016-01-30T18:01:23.793648708+08:00
     EventsListeners: 0
     Init SHA1: 3fd06f588b90413408e5a41309d802ca71feeea6
     Init Path: /usr/libexec/docker/dockerinit
     Docker Root Dir: /var/lib/docker
    WARNING: No swap limit support
    WARNING: bridge-nf-call-iptables is disabled
    WARNING: bridge-nf-call-ip6tables is disabled
    [root@n6 n6.conf]#

    resolve:
    [root@localhost core]# iptables -F
    [root@localhost core]# ip6tables -F

    --------------------------------
    problem:
    [root@localhost ~]# systemctl restart docker
    Job for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details.
    [root@localhost ~]# docker daemon
    WARN[0000] Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.
    FATA[0000] Error starting daemon: error initializing graphdriver: "/var/lib/docker" contains other graphdrivers: overlay; Please cleanup or explicitly choose storage driver (-s <DRIVER>)

    resolve:
    # rm /var/lib/docker/overlay/ -rf

    --------------------------------
    Docker not starting “ could not delete the default bridge network: network bridge has active endpoints”"

    Run
    sudo mv /var/lib/docker/network/files/ /tmp/dn-bak

    to reset your networks. Then restart docker (sudo systemctl restart docker or sudo service docker restart depending on your OS). If everything works again you can delete the dn-bak directory.

    --------------------------------

    [root@localhost ~]# gedit /etc/sysconfig/docker
    DOCKER_OPTS="--dns 8.8.8.8 --dns 75.75.75.76"
    DOCKER_OPTS="--iptables=true --dns=10.20.100.1 --dns=8.8.8.8"

    --------------------------------

    On arch linux I needed
    ip link set down docker0 instead of ifconfig docker0 down and
    systemctl restart docker instead of service docker start.
    To delete all images, I did
    docker rmi $(docker images -q)

    ++++++++++++++++
    docker tag c8a648134623 docker.io/centos:core

    /etc/sysconfig/docker-storage这个配置文件
    DOCKER_STORAGE_OPTIONS="--storage-opt dm.no_warn_on_loop_devices=true"
    or
    DOCKER_STORAGE_OPTIONS="-s overlay"


    ------------------------------------------------------------------------------------------------------
    https://github.com/docker/docker/blob/master/CONTRIBUTING.md#reporting-other-issues
    ------------------------------------------------------------------------------------------------------
    Description of problem:

    `docker version`:

    `docker info`:

    `uname -a`:

    Environment details (AWS, VirtualBox, physical, etc.):

    How reproducible:

    Steps to Reproduce:
    1.
    2.
    3.

    Actual Results:

    Expected Results:

    Additional info:
    ------------------------------------------------------------------------------------------------------




  • 相关阅读:
    Windows 8实例教程系列 布局控制
    Windows8/Silverlight/WPF/WP7/HTML5周学习导读(1月7日1月14日)
    Silverlight动态设置WCF服务Endpoint
    轻松搭建Windows8云平台开发环境
    Windows8/Silverlight/WPF/WP7/HTML5周学习导读(1月28日2月3日)
    Windows8/Silverlight/WPF/WP7/HTML5周学习导读(1月1日1月6日)
    Qt 打印已加载可用的数据库驱动
    QSqlDatabase: QOCI driver not loaded
    QT oracle
    参数初始化列表
  • 原文地址:https://www.cnblogs.com/ztguang/p/12646982.html
Copyright © 2020-2023  润新知