• openshift 3.11安装部署


    openshift 3.11 安装部署

    1 环境准备(所有节点)

    openshift 版本 v3.11
    1.1 机器环境
    ip              cpu  mem   hostname  OSsystem
    192.168.1.130    4    16   master  CentOS7.6
    192.168.1.132    2    4    node01  CentOS7.6
    192.168.1.135    2    4    node02  CentOS7.6
    1.2 免密码ssh登陆
    ssh-keygen 
    ssh-copy-id 192.168.1.130
    ssh-copy-id 192.168.1.132
    ssh-copy-id 192.168.1.135
    1.3 hosts解析
    vim /etc/hosts
    192.168.1.130 master
    192.168.1.132 node01
    192.168.1.135 node02
    ---------------------
    scp -rp /etc/hosts 192.168.1.132:/etc/hosts
    scp -rp /etc/hosts 192.168.1.135:/etc/hosts
    1.4 selinux和关闭防火墙

    #sed -i 's/SELINUX=.*/SELINUX=enforcing/' /etc/selinux/config
    #sed -i 's/SELINUXTYPE=.*/SELINUXTYPE=targeted/' /etc/selinux/config

    开放8443端口给openshift,api使用
    /sbin/iptables -I INPUT -p tcp --dport 8443 -j ACCEPT && service iptables save

    1.2.3 安装需要的软件包

    yum install -y wget git ntp net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct nfs-utils yum-utils docker NetworkManager

    1.2.4 其他
    sysctl net.ipv4.ip_forward=1
    yum install pyOpenSSL httpd-tools -y 
    systemctl start NetworkManager 
    systemctl enable NetworkManager
    
    配置镜像加速器
    echo '{ "insecure-registries": ["172.30.0.0/16"], "registry-mirrors": ["https://xxxxx.mirror.aliyuncs.com"] }' >/etc/docker/daemon.json systemctl daemon-reload && systemctl enable docker && systemctl restart docker
    1.2.5 镜像下载
    #master镜像列表(主节点)
    echo 'docker.io/cockpit/kubernetes
    docker.io/openshift/origin-haproxy-router
    docker.io/openshift/origin-haproxy-router  
    docker.io/openshift/origin-service-catalog
    docker.io/openshift/origin-node
    docker.io/openshift/origin-deployer
    docker.io/openshift/origin-control-plane
    docker.io/openshift/origin-control-plane
    docker.io/openshift/origin-template-service-broker
    docker.io/openshift/origin-pod
    docker.io/cockpit/kubernetes
    docker.io/openshift/origin-web-console
    quay.io/coreos/etcd' >image.txt && 
    while read line; do docker pull $line ; done<image.txt
    
    
    #node镜像列表(两个node节点)
    echo 'docker.io/openshift/origin-haproxy-router
    docker.io/openshift/origin-node
    docker.io/openshift/origin-deployer
    docker.io/openshift/origin-pod
    docker.io/ansibleplaybookbundle/origin-ansible-service-broker
    docker.io/openshift/origin-docker-registry' >image.txt && 
    while read line; do docker pull $line ; done<image.txt

    2 配置ansible(主节点)

    2.1 下载openshift-ansible代码

    需要下载2.6.5版本的ansible

    git clone -b release-3.11 https://github.com/openshift/openshift-ansible.git
    
    wget https://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin311/ansible-2.6.5-1.el7.noarch.rpm &&
    yum localinstall ansible-2.6.5-1.el7.noarch.rpm -y &&
    yum install -y etcd &&
    systemctl enable etcd &&
    systemctl start etcd
    2.2 配置文件
    [root@master ~]# cat /etc/ansible/hosts
    [all]
    # all下放所有机器节点的名称
    master node01 node02 [OSEv3:children]
    #这里放openshfit的角色,这里有三个角色,master,node,etcd masters nodes etcd [OSEv3:vars]
    #这里是openshfit的安装参数
    #指定ansible使用ssh的用户为root
    ansible_ssh_user=root
    #指定方式为origin
    openshift_deployment_type=origin
    #指定版本为3.11
    openshift_release=3.11
    openshift_enable_service_catalog=false openshift_clock_enabled=true openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] openshift_disable_check=disk_availability,docker_storage,memory_availability,docker_image_availability [masters]
    #master角色的机器名称包含 master [etcd]
    #etcd角色的机器名称包含
    master [nodes] node角色的机器名称包含
    master openshift_node_group_name='node-config-all-in-one' node01 openshift_node_group_name='node-config-compute' node02 openshift_node_group_name='node-config-compute' #openshift_enable_service_catalog=false #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_nfs_directory=/data/docker #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_registry_storage_volume_name=registry #openshift_hosted_registry_storage_volume_size=20Gi # openshiftclock_enabled=true # ansible_service_broker_install=false

    3 使用ansible来进行安装

    #安装前检查
    ansible-playbook ~/openshift-ansible/playbooks/prerequisites.yml
    #安装
    ansible-playbook ~/openshift-ansible/playbooks/deploy_cluster.yml

    #如需重新安装,先卸载
    ansible-playbook ~/openshift-ansible/playbooks/adhoc/uninstall.yml

    4 安装后配置(主节点)

    4.1 配置nfs持久卷
    yum install nfs-utils rpcbind -y 
    mkdir -p /data/v0{01..20} /data/{docker,volume,registry}
    chmod -R 777 /data 
    vim /etc/exports
    /data 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v001 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v002 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v003 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v004 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v005 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v006 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v007 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v008 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v009 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/v010 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
    /data/docker *(rw,sync,no_all_squash,no_root_squash)
    
    systemctl restart rpcbind &&
    systemctl restart nfs && 
    systemctl enable rpcbind &&
    systemctl enable nfs
    exportfs -r
    kubectl apply -f pv-01-10.yaml
    配置文件参考章节最后 pv-01-10.yaml
    4.2 创建openshift用户
    oc login -u system:admin                                ##使用系统管理员用户登录
    htpasswd -b /etc/origin/master/htpasswd admin 123456    ##创建用户
    htpasswd -b /etc/origin/master/htpasswd dev dev         ##创建用户
    oc login -u admin                                       ##使用用户登录
    oc logout                                               ##退出当前用户
    4.3 赋予创建的用户集群管理员权限
    oc login -u system:admin &&                            
    oc adm policy add-cluster-role-to-user cluster-admin xxxxx
    4.4 访问测试

    需要添加hosts解析到本地电脑

    192.168.1.130 master
    192.168.1.132 node01
    192.168.1.135 node02

    账号密码是上面创建用户的账号密码
    http://master:8443 admin/123456

    5 其他配置

    5.1 部署集群节点管理cockpit
    yum install -y cockpit cockpit-docker cockpit-kubernetes &&
    systemctl start cockpit &&
    systemctl enable cockpit.socket &&
    iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 9090 -j ACCEPT

    https://192.168.1.130:9090 账号密码是机器的ssh账号密码

    5.2 命令补全
    #kubectl 命令补全  
    mkdir -p /usr/share/bash-completion/kubernetes
    kubectl completion bash >/usr/share/bash-completion/kubernetes/bash_completion
    echo 'source /usr/share/bash-completion/kubernetes/bash_completion' >>~/.bash_profile
    
    #oc 自动补全
    mkdir -p /usr/share/bash-completion/openshift
    oc completion bash >/usr/share/bash-completion/openshift/bash_completion
    echo "source /usr/share/bash-completion/openshift/bash_completion" >> ~/.bash_profile
    
    source ~/.bash_profile
    5.3 openshift登录
    #admin用户登陆openshift:用户名dev 密码:dev
    oc login -n openshift
    
    oc get svc -n default|grep docker-registry|awk '{print $3}'
    #查看admin用户的token
    oc whoami -t
    #登录docker私库
    docker login -u admin -p `oc whoami -t` docker-registry.default.svc:5000
    通过观察service的docker-registry的IP
    
    将svc添加每台主机的hosts做对应的解析
    5.4 常用命令行操作
    #master-restart api
    #master-restart controllers
    oc whoami -t                                            ###查看当前用户token
    oc login https://master:8443 --token=`oc whoami -t`     ###使用用户token登录
    oc get nodes                                            ###查看当前node节点状态

    6 其他

    6.1 pv-01-10.yaml文件
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv001
      labels:
        name: pv001
        type: nfs
    spec:
      nfs:
        path: /data/v001
        server: 192.168.1.130
      capacity:
        storage: 50Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv002
      labels:
        name: nfs-pv002
        type: nfs
    spec:
      nfs:
        path: /data/v002
        server: 192.168.1.130
      capacity:
        storage: 50Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv003
      labels:
        name: nfs-pv003
        type: nfs
    spec:
      nfs:
        path: /data/v003
        server: 192.168.1.130
      capacity:
        storage: 30Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv004
      labels:
        name: nfs-pv004
        type: nfs
    spec:
      nfs:
        path: /data/v004
        server: 192.168.1.130
      capacity:
        storage: 30Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv005
      labels:
        name: nfs-pv005
        type: nfs
    spec:
      nfs:
        path: /data/v005
        server: 192.168.1.130
      capacity:
        storage: 10Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv006
      labels:
        name: nfs-pv006
        type: nfs
    spec:
      nfs:
        path: /data/v006
        server: 192.168.1.130
      capacity:
        storage: 10Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv007
      labels:
        name: nfs-pv007
        type: nfs
    spec:
      nfs:
        path: /data/v007
        server: 192.168.1.130
      capacity:
        storage: 5Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv008
      labels:
        name: nfs-pv008
        type: nfs
    spec:
      nfs:
        path: /data/v008
        server: 192.168.1.130
      capacity:
        storage: 5Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv009
      labels:
        name: nfs-pv009
        type: nfs
    spec:
      nfs:
        path: /data/v009
        server: 192.168.1.130
      capacity:
        storage: 2Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: nfs-pv010
      labels:
        name: nfs-pv010
        type: nfs
    spec:
      nfs:
        path: /data/v010
        server: 192.168.1.130
      capacity:
        storage: 2Gi
      accessModes:
        - ReadWriteMany
        - ReadWriteOnce
        - ReadOnlyMany
      persistentVolumeReclaimPolicy: Retain
  • 相关阅读:
    vue vmodel input type=checkbox的问题
    springboot配置文件优先级
    原生js实现复制功能
    Long.valueOf和Long.parseLong的区别
    程序员学习参考
    国外开源项目
    .NET快速入门教程
    Microsoft Update Catalog 离线安装包下载
    php header示例代码
    CentOS下iptables设置
  • 原文地址:https://www.cnblogs.com/wzstudy/p/11492782.html
Copyright © 2020-2023  润新知