• Centos 6.5 安装Oracle 11g R2 on vbox


    由于上一篇的rac安装,截图较多,这一篇选择以txt的方式叙述,另外上一篇的时间比较久远,这里最近从新安装

    --2018-10-29

    1 os环境初始化

    [root@rac1 yum.repos.d]# lsb_release -a
    LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch
    Distributor ID: CentOS
    Description: CentOS release 6.5 (Final)
    Release: 6.5
    Codename: Final
    [root@rac1 yum.repos.d]# uname -r
    2.6.32-431.el6.x86_64
    [root@rac1 ~]# ls -l /dev/cdrom |grep cdrom
    lrwxrwxrwx. 1 root root 3 Oct 26 2018 /dev/cdrom -> sr0
    [root@rac1 ~]# mount -t iso9660 /dev/cdrom /mnt/
    mount: block device /dev/sr0 is write-protected, mounting read-only
    [root@rac1 ~]# cd /etc/yum.repos.d/
    [root@rac1 yum.repos.d]# mv CentOS-Base.repo CentOS-Base.repo.bk
    [root@rac1 yum.repos.d]# vim public-yum-ol6.repo
    [root@rac1 yum.repos.d]# cat public-yum-ol6.repo
    [ol6_latest]
    name=CentOS6 $releasever Latest ($basearch)
    gpgkey=file:///mnt/RPM-GPG-KEY-CentOS-6
    baseurl=file:///mnt
    gpgcheck=1
    enabled=1
    [root@rac1 yum.repos.d]# yum clean all
    [root@rac1 yum.repos.d]# yum makecache
    [root@rac1 yum.repos.d]# yum install lrzsz -y

    [root@rac1 opt]# vi /etc/sysconfig/selinux
    [root@rac1 opt]# setenforce 0
    [root@rac1 opt]# service iptables stop
    iptables: Setting chains to policy ACCEPT: filter [ OK ]
    iptables: Flushing firewall rules: [ OK ]
    iptables: Unloading modules: [ OK ]
    [root@rac1 opt]# chkconfig iptables off

    2 oracle环境初始化

    [root@rac1 opt]# /usr/sbin/groupadd -g 1000 oinstall
    [root@rac1 opt]# /usr/sbin/groupadd -g 1020 asmadmin
    [root@rac1 opt]# /usr/sbin/groupadd -g 1021 asmdba
    [root@rac1 opt]# /usr/sbin/groupadd -g 1022 asmoper
    [root@rac1 opt]# /usr/sbin/groupadd -g 1031 dba
    [root@rac1 opt]# /usr/sbin/groupadd -g 1032 oper
    [root@rac1 opt]# useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
    [root@rac1 opt]# useradd -u 1101 -g oinstall -G dba,asmdba,oper oracle
    [root@rac1 opt]# mkdir -p /u01/app/11.2.0/grid
    [root@rac1 opt]# mkdir -p /u01/app/grid
    [root@rac1 opt]# mkdir /u01/app/oracle
    [root@rac1 opt]# chown -R grid:oinstall /u01
    [root@rac1 opt]# chown oracle:oinstall /u01/app/oracle
    [root@rac1 opt]# chmod -R 775 /u01/
    [root@rac1 ~]# passwd grid
    [root@rac1 ~]# passwd oracle
    [root@rac1 opt]# grep MemTotal /proc/meminfo
    MemTotal: 3088656 kB
    [root@rac1 opt]# vi /etc/sysctl.conf
    [root@rac1 opt]# sysctl -p
    [root@rac1 opt]# vim /etc/security/limits.conf
    [root@rac1 opt]# vi /etc/pam.d/login

    [root@rac1 yum.repos.d]# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
    [root@rac1 yum.repos.d]# echo "DNS1=8.8.8.8" >> /etc/sysconfig/network-scripts/ifcfg-eth0

    yum install gcc gcc-c++ libaio* glibc* glibc-devel* ksh libgcc* libstdc++* libstdc++-devel* make sysstat \
    unixODBC* compat-libstdc++-33.x86_64 elfutils-libelf-devel glibc.i686 compat-libcap1 smartmontools unzip openssh* parted cvuqdisk -y
    [root@rac1 ~]# yum install ntpdate -y
    [root@rac1 ~]# ntpdate time.windows.com
    [root@rac1 ~]# date
    yum install xterm xclock -y

    3 ip规划

    [root@rac1 ~]# vim /etc/hosts
    #Public IP
    10.15.7.11 rac1
    10.15.7.12 rac2
    #Private IP
    172.168.1.18 rac1priv
    172.168.1.19 rac2priv
    #Virtual IP
    10.15.7.13 rac1vip
    10.15.7.14 rac2vip
    #Scan IP
    10.15.7.15 racscan
    [root@rac1 ~]# cat /etc/sysconfig/network
    NETWORKING=yes
    HOSTNAME=rac1
    GATEWAY=10.15.4.1
    [root@rac1 ~]# vim .bash_profile
    umask 022
    DISPLAY=10.15.7.115:0.0 ; export DISPLAY
    HISTTIMEFORMAT="%Y:%M:%D %H-%m-%s"
    export=HISTTIMEFORMAT
    [root@rac1 ~]# source .bash_profile

    4 grid和oracle 的bash文件

    [grid@rac1 ~]$ cat .bash_profile
    # .bash_profile

    # Get the aliases and functions
    if [ -f ~/.bashrc ]; then
    . ~/.bashrc
    fi

    # User specific environment and startup programs

    PATH=$PATH:$HOME/bin

    export PATH
    export TMP=/tmp
    export TMPDIR=$TMP
    export ORACLE_SID=+ASM1
    export ORACLE_BASE=/u01/app/grid
    export ORACLE_HOME=/u01/app/11.2.0/grid
    export PATH=/usr/sbin:$PATH
    export PATH=$ORACLE_HOME/bin:$PATH
    export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
    export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
    umask 022
    [root@rac1 ~]# su - oracle
    [oracle@rac1 ~]$ vi .bash_profile
    [root@rac1 ~]# cat /home/oracle/.bash_profile
    # .bash_profile

    # Get the aliases and functions
    if [ -f ~/.bashrc ]; then
    . ~/.bashrc
    fi

    # User specific environment and startup programs

    PATH=$PATH:$HOME/bin

    export PATH

    export TMP=/tmp
    export TMPDIR=$TMP
    export ORACLE_SID=bol1
    export ORACLE_UNQNAME=bol
    export ORACLE_BASE=/u01/app/oracle
    export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
    export TNS_ADMIN=$ORACLE_HOME/network/admin
    export PATH=/usr/sbin:$PATH
    export PATH=$ORACLE_HOME/bin:$PATH
    export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

    $ source .bash_profile使配置文件生效
    [root@rac1 ~]# source /home/grid/.bash_profile
    [root@rac1 ~]# source /home/oracle/.bash_profile

    5 克隆主机rac1并修改

    克隆并配置虚拟主机rac2 或者重新配置rac2
    [root@rac2 ~]# cat /home/grid/.bash_profile
    # .bash_profile

    # Get the aliases and functions
    if [ -f ~/.bashrc ]; then
    . ~/.bashrc
    fi

    # User specific environment and startup programs

    PATH=$PATH:$HOME/bin
    export PATH
    export TMP=/tmp
    export TMPDIR=$TMP
    export ORACLE_SID=+ASM2
    export ORACLE_BASE=/u01/app/grid
    export ORACLE_HOME=/u01/app/11.2.0/grid
    export PATH=/usr/sbin:$PATH
    export PATH=$ORACLE_HOME/bin:$PATH
    export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
    export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
    umask 022
    export PATH
    [root@rac2 ~]# cat /home/oracle/.bash_profile
    # .bash_profile

    # Get the aliases and functions
    if [ -f ~/.bashrc ]; then
    . ~/.bashrc
    fi

    # User specific environment and startup programs

    PATH=$PATH:$HOME/bin
    export TMP=/tmp
    export TMPDIR=$TMP
    export ORACLE_SID=bol2
    export ORACLE_UNQNAME=bol
    export ORACLE_BASE=/u01/app/oracle
    export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
    export TNS_ADMIN=$ORACLE_HOME/network/admin
    export PATH=/usr/sbin:$PATH
    export PATH=$ORACLE_HOME/bin:$PATH
    export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
    export PATH

    6 配置ssh等效性

    各节点生成Keys:
    [root@rac1 ~]# su - oracle
    [oracle@rac1 ~]$ mkdir ~/.ssh
    [oracle@rac1 ~]$ chmod 700 ~/.ssh
    [oracle@rac1 ~]$ ssh-keygen -t rsa
    [oracle@rac1 ~]$ ssh-keygen -t dsa
    [root@rac2 ~]# su - oracle
    [oracle@rac2 ~]$ mkdir ~/.ssh
    [oracle@rac2 ~]$ chmod 700 ~/.ssh
    [oracle@rac2 ~]$ ssh-keygen -t rsa
    [oracle@rac2 ~]$ ssh-keygen -t dsa
    在节点1上进行互信配置:
    [oracle@rac1 ~]$ touch ~/.ssh/authorized_keys
    [oracle@rac1 ~]$ cd ~/.ssh
    [oracle@rac1 .ssh]$ ssh rac1 cat ~/.ssh/id_rsa.pub >> authorized_keys
    [oracle@rac1 .ssh]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> authorized_keys
    [oracle@rac1 .ssh]$ ssh rac1 cat ~/.ssh/id_dsa.pub >> authorized_keys
    [oracle@rac1 .ssh]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> authorized_keys
    在rac1把存储公钥信息的验证文件传送到rac2上
    [oracle@rac1 .ssh]$ pwd
    /home/oracle/.ssh
    [oracle@rac1 .ssh]$ scp authorized_keys oracle@10.15.7.12:/home/oracle/.ssh/.

    设置验证文件的权限
    在每一个节点执行:
    $ chmod 600 ~/.ssh/authorized_keys

    启用用户一致性
    在你要运行OUI的节点以oracle用户运行(这里选择rac1):
    [oracle@rac1 .ssh]$ exec /usr/bin/ssh-agent $SHELL
    [oracle@rac1 .ssh]$ ssh-add
    Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
    Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)

    验证ssh配置是否正确
    以oracle用户在所有节点分别执行:
    ssh rac1 date
    ssh rac2 date
    ssh rac1priv date
    ssh rac2priv date
    ===========
    [oracle@rac1 .ssh]$ su - grid
    Password:
    [grid@rac1 ~]$ mkdir ~/.ssh
    [grid@rac1 ~]$ ssh-keygen -t rsa
    [grid@rac1 ~]$ ssh-keygen -t dsa
    [grid@rac1 ~]$ chmod 700 ~/.ssh

    [oracle@rac2 .ssh]$ su - grid
    Password:
    [grid@rac2 ~]$ mkdir ~/.ssh
    [grid@rac2 ~]$ ssh-keygen -t rsa
    [grid@rac2 ~]$ ssh-keygen -t dsa
    [grid@rac2 ~]$ chmod 700 ~/.ssh

    [grid@rac1 ~]$ touch ~/.ssh/authorized_keys
    [grid@rac1 ~]$ cd ~/.ssh
    [grid@rac1 .ssh]$ ssh rac1 cat ~/.ssh/id_rsa.pub >> authorized_keys
    [grid@rac1 .ssh]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> authorized_keys
    [grid@rac1 .ssh]$ ssh rac1 cat ~/.ssh/id_dsa.pub >> authorized_keys
    [grid@rac1 .ssh]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> authorized_keys
    [grid@rac1 .ssh]$ scp authorized_keys grid@rac2:/home/grid/.ssh/.
    [grid@rac1 .ssh]$ exec /usr/bin/ssh-agent $SHELL
    [grid@rac1 .ssh]$ ssh-add
    Identity added: /home/grid/.ssh/id_rsa (/home/grid/.ssh/id_rsa)
    Identity added: /home/grid/.ssh/id_dsa (/home/grid/.ssh/id_dsa)

    [grid@rac1 .ssh]$
    ssh rac1 date
    ssh rac2 date
    ssh rac1priv date
    ssh rac2priv date

    [grid@rac2 .ssh]$
    ssh rac1 date
    ssh rac2 date
    ssh rac1priv date
    ssh rac2priv date

    7 配置共享存储

    udev方式
    [root@rac1 ~]# cat /etc/issue
    CentOS release 6.5 (Final)
    Kernel \r on an \m
    [root@rac1 ~]# echo "options=--whitelisted --replace-whitespace" >> /etc/scsi_id.config
    [root@rac1 ~]# for i in b c d e f g;
    > do
    > echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >> /etc/udev/rules.d/99-oracle-asmdevices.rules
    > done
    ===
    for i in b c d e f g;
    do
    echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >> /etc/udev/rules.d/99-oracle-asmdevices.rules
    done
    ===
    [root@rac1 ~]# cat /etc/udev/rules.d/99-oracle-asmdevices.rules
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBae01435a-3e52303e", NAME="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB714cf53b-64b30d94", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBdbf678d2-9ce1dd68", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBfe506a5f-89a411c1", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB0b346c6a-a4a60e78", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"
    KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB9a904921-a5958d54", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
    [root@rac1 ~]# /sbin/start_udev
    Starting udev: [ OK ]
    [root@rac1 ~]# ls -l /dev/asm*
    brw-rw---- 1 grid asmadmin 8, 16 Oct 29 08:15 /dev/asm-diskb
    brw-rw---- 1 grid asmadmin 8, 32 Oct 29 08:15 /dev/asm-diskc
    brw-rw---- 1 grid asmadmin 8, 48 Oct 29 08:15 /dev/asm-diskd
    brw-rw---- 1 grid asmadmin 8, 64 Oct 29 08:15 /dev/asm-diske
    brw-rw---- 1 grid asmadmin 8, 80 Oct 29 08:15 /dev/asm-diskf
    brw-rw---- 1 grid asmadmin 8, 96 Oct 29 08:15 /dev/asm-diskg

    [root@rac2 ~]# ls -l /dev/asm*
    brw-rw---- 1 grid asmadmin 8, 16 Oct 29 08:19 /dev/asm-diskb
    brw-rw---- 1 grid asmadmin 8, 32 Oct 29 08:19 /dev/asm-diskc
    brw-rw---- 1 grid asmadmin 8, 48 Oct 29 08:19 /dev/asm-diskd
    brw-rw---- 1 grid asmadmin 8, 64 Oct 29 08:19 /dev/asm-diske
    brw-rw---- 1 grid asmadmin 8, 80 Oct 29 08:19 /dev/asm-diskf
    brw-rw---- 1 grid asmadmin 8, 96 Oct 29 08:19 /dev/asm-diskg

    8 安装gird

    [root@rac1 opt]# unzip p13390677_112040_Linux-x86-64_3of7.zip

    /usr/bin/xterm -ls -display $DISPLAY
    =[INS-41104]存在找不到private网卡,
    几番试过之后,把eth1删除,然后重启,ifconfig找到eth1的hwaddr,cp eth0 eth1 ,并修改hwaddr和ipaddr,device,name,注释uuid
    结果如下
    ===
    [root@rac1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=eth0
    TYPE=Ethernet
    UUID=7e047ed1-c530-478f-8704-65565ac802ab
    ONBOOT=yes
    NM_CONTROLLED=yes
    BOOTPROTO=none
    HWADDR=08:00:27:0E:DE:0E
    IPADDR=10.15.7.11
    PREFIX=22
    GATEWAY=10.15.4.1
    DNS1=8.8.8.8
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=yes
    IPV6INIT=no
    NAME="System eth0"

    [root@rac1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
    DEVICE=eth1
    TYPE=Ethernet
    #UUID=7e047ed1-c530-478f-8704-65565ac802ab
    ONBOOT=yes
    NM_CONTROLLED=yes
    BOOTPROTO=none
    HWADDR=08:00:27:8A:F5:38
    IPADDR=1.1.1.1
    PREFIX=22
    GATEWAY=10.15.4.1
    DNS1=8.8.8.8
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=yes
    IPV6INIT=no
    NAME="System eth1"

    [root@rac2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=eth0
    TYPE=Ethernet
    UUID=288aa282-1d1f-4578-bd6a-1b45a48892b1
    ONBOOT=yes
    NM_CONTROLLED=yes
    BOOTPROTO=none
    HWADDR=08:00:27:33:6D:5A
    IPADDR=10.15.7.12
    PREFIX=22
    GATEWAY=10.15.4.1
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=yes
    IPV6INIT=no
    NAME="System eth0"
    DNS1=8.8.8.8
    [root@rac2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
    DEVICE=eth1
    TYPE=Ethernet
    #UUID=288aa282-1d1f-4578-bd6a-1b45a48892b1
    ONBOOT=yes
    NM_CONTROLLED=yes
    BOOTPROTO=none
    HWADDR=08:00:27:DC:E4:01
    IPADDR=1.1.1.2
    PREFIX=22
    GATEWAY=10.15.4.1
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=yes
    IPV6INIT=no
    NAME="System eth1"
    DNS1=8.8.8.8

    =======
    [root@rac1 ~]# cat /etc/hosts
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    10.15.7.11 rac1
    1.1.1.1 rac1priv
    10.15.7.13 rac1vip
    10.15.7.12 rac2
    1.1.1.2 rac2priv
    10.15.7.14 rac2vip
    10.15.7.15 scanip

    两节点进行ping测试,分别ping“公有IP”和“私有IP”,两节点ping通正常后,按如下方式配置:
    [root@rac1 ~]# ping 1.1.1.2
    [root@rac1 ~]# ping 10.15.7.12
    [root@rac1 ~]# ping rac2priv
    [root@rac2 ~]# ping 1.1.1.1
    [root@rac2 ~]# ping 10.15.7.11
    [root@rac2 ~]# ping rac1priv

    ssh rac1 date
    ssh rac2 date
    ssh rac1priv date
    ssh rac2priv date

    # yum install -y ksh
    [root@rac2 opt]# rpm -ivh --nodeps pdksh-5.2.14-36.el5.i386.rpm
    [root@rac2 ~]# yum list|grep ksh
    ksh.x86_64 20120801-37.el6_9 @base
    pdksh.i386 5.2.14-36.el5 installed
    mksh.x86_64 39-7.el6_4.1 l6_latest

    [root@rac1 rpm]# rpm -ivh cvuqdisk-1.0.9-1.rpm
    [root@rac1 ~]# vi /etc/sysctl.conf
    #ORACLE SETTING
    fs.aio-max-nr = 1048576
    fs.file-max = 6815744
    kernel.shmmax = 68719476736
    kernel.shmall = 4294967296
    kernel.shmmni = 4096
    kernel.sem = 250 32000 100 128
    net.ipv4.ip_local_port_range = 9000 65500
    net.core.rmem_default = 262144
    net.core.rmem_max = 4194304
    net.core.wmem_default = 262144
    net.core.wmem_max = 1048586

    在rac1中执行脚本
    [root@rac1 rpm]# /u01/app/oraInventory/orainstRoot.sh
    [root@rac1 rpm]# /u01/app/11.2.0/grid/root.sh
    在rac2执行脚本
    [root@rac2 grid]# /u01/app/oraInventory/orainstRoot.sh
    [root@rac2 grid]# /u01/app/11.2.0/grid/root.sh

    [root@rac1 ~]# /u01/app/oraInventory/orainstRoot.sh
    Changing permissions of /u01/app/oraInventory.
    Adding read,write permissions for group.
    Removing read,write,execute permissions for world.

    Changing groupname of /u01/app/oraInventory to oinstall.
    The execution of the script is complete.
    [root@rac1 ~]# /u01/app/11.2.0/grid/root.sh
    Performing root user operation for Oracle 11g

    The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME= /u01/app/11.2.0/grid

    Enter the full pathname of the local bin directory: [/usr/local/bin]:
    Copying dbhome to /usr/local/bin ...
    Copying oraenv to /usr/local/bin ...
    Copying coraenv to /usr/local/bin ...


    Creating /etc/oratab file...
    Entries will be added to the /etc/oratab file as needed by
    Database Configuration Assistant when a database is created
    Finished running generic part of root script.
    Now product-specific root actions will be performed.
    Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
    Creating trace directory
    User ignored Prerequisites during installation
    Installing Trace File Analyzer
    OLR initialization - successful
    root wallet
    root wallet cert
    root cert export
    peer wallet
    profile reader wallet
    pa wallet
    peer wallet keys
    pa wallet keys
    peer cert request
    pa cert request
    peer cert
    pa cert
    peer root cert TP
    profile reader root cert TP
    pa root cert TP
    peer pa cert TP
    pa peer cert TP
    profile reader pa cert TP
    profile reader peer cert TP
    peer user cert
    pa user cert
    Adding Clusterware entries to upstart
    CRS-2672: Attempting to start 'ora.mdnsd' on 'rac1'
    CRS-2676: Start of 'ora.mdnsd' on 'rac1' succeeded
    CRS-2672: Attempting to start 'ora.gpnpd' on 'rac1'
    CRS-2676: Start of 'ora.gpnpd' on 'rac1' succeeded
    CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac1'
    CRS-2672: Attempting to start 'ora.gipcd' on 'rac1'
    CRS-2676: Start of 'ora.cssdmonitor' on 'rac1' succeeded
    CRS-2676: Start of 'ora.gipcd' on 'rac1' succeeded
    CRS-2672: Attempting to start 'ora.cssd' on 'rac1'
    CRS-2672: Attempting to start 'ora.diskmon' on 'rac1'
    CRS-2676: Start of 'ora.diskmon' on 'rac1' succeeded
    CRS-2676: Start of 'ora.cssd' on 'rac1' succeeded

    ASM created and started successfully.

    Disk Group OCR created successfully.

    clscfg: -install mode specified
    Successfully accumulated necessary OCR keys.
    Creating OCR keys for user 'root', privgrp 'root'..
    Operation successful.
    CRS-4256: Updating the profile
    Successful addition of voting disk 62cb5cc42bdc4ff8bfd1069706eeed4b.
    Successfully replaced voting disk group with +OCR.
    CRS-4256: Updating the profile
    CRS-4266: Voting file(s) successfully replaced
    ## STATE File Universal Id File Name Disk group
    -- ----- ----------------- --------- ---------
    1. ONLINE 62cb5cc42bdc4ff8bfd1069706eeed4b (/dev/asm-diskb) [OCR]
    Located 1 voting disk(s).
    CRS-2672: Attempting to start 'ora.asm' on 'rac1'
    CRS-2676: Start of 'ora.asm' on 'rac1' succeeded
    CRS-2672: Attempting to start 'ora.OCR.dg' on 'rac1'
    CRS-2676: Start of 'ora.OCR.dg' on 'rac1' succeeded
    Configure Oracle Grid Infrastructure for a Cluster ... succeeded

    [root@rac1 ~]# su - grid
    [grid@rac1 ~]$ crsctl check crs
    CRS-4638: Oracle High Availability Services is online
    CRS-4537: Cluster Ready Services is online
    CRS-4529: Cluster Synchronization Services is online
    CRS-4533: Event Manager is online
    [grid@rac1 ~]$ crs_stat -t -v
    Name Type R/RA F/FT Target State Host
    ----------------------------------------------------------------------
    ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac1
    ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac1
    ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
    ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1
    ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac1
    ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
    ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1
    ora.oc4j ora.oc4j.type 0/1 0/2 ONLINE ONLINE rac1
    ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1
    ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
    ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
    ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE
    ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
    ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1
    ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
    ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
    ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE
    ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
    ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2
    ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac1

    [grid@rac1 ~]$ olsnodes -n #.查看集群中节点配置信息
    rac1 1
    rac2 2
    [grid@rac1 ~]$ olsnodes -n -i -s -t
    rac1 1 rac1vip Active Unpinned
    rac2 2 rac2vip Active Unpinned
    [grid@rac1 ~]$ ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print$9}'
    LISTENER_SCAN1
    LISTENER
    [grid@rac1 ~]$ srvctl status asm -a
    ASM is running on rac2,rac1
    ASM is enabled.
    [grid@rac1 ~]$ crsctl query css votedisk #查看集群件的表决磁盘信息
    ## STATE File Universal Id File Name Disk group
    -- ----- ----------------- --------- ---------
    1. ONLINE 62cb5cc42bdc4ff8bfd1069706eeed4b (/dev/asm-diskb) [OCR]
    Located 1 voting disk(s).

    9 asmca

    只在节点rac1执行即可
    进入grid用户下
    [root@rac1 ~]# su - grid
    利用asmca
    [grid@rac1 ~]$ asmca

    具体安装截图可参看上一篇rac安装

    10 安装db

    [root@rac1 ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh
    [root@rac2 ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh

    11 dbca

    [root@rac1 ~]# su - oracel
    [oracle@rac1 ~]$ dbca

    12 RAC维护

    [grid@rac1 ~]$ crs_stat -t
    [grid@rac1 ~]$ crsctl check cluster
    [grid@rac1 ~]$ crsctl check crs
    [grid@rac1 ~]$ srvctl status database -d bol
    $ olsnodes
    $ olsnodes -n
    $ olsnodes -n -i -s -t
    $ crsctl query css votedisk
    $ srvctl config scan #查看集群SCAN VIP信息
    [grid@rac1 ~]$ srvctl config scan
    SCAN name: scanip, Network: 1/10.15.4.0/255.255.252.0/eth0
    SCAN VIP name: scan1, IP: /scanip/10.15.7.15
    [grid@rac1 ~]$ srvctl config scan_listener #查看集群SCAN Listener信息
    SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521

    12.1 启、停集群数据库


    整个集群的数据库启停
    进入grid用户
    [grid@rac1 ~]$ srvctl stop database -d bol
    [grid@rac1 ~]$ srvctl start database -d bol
    [grid@rac1 ~]$ srvctl status database -d bol
    关闭所有节点
    进入root用户
    关闭所有节点
    [root@rac1 bin]# pwd
    /u01/app/11.2.0/grid/bin
    [root@rac1 bin]# ./crs_stat -t -v #确认集群各项资源和服务
    [root@rac1 bin]# ./crsctl stop crs #实际只关闭了当前结点
    [root@rac1 bin]# ./crsctl stop cluster -all#关闭集群
    [root@rac1 bin]# ./crs_stat -t -v
    开启
    [root@rac1 bin]# ./crs_stat -t -v
    [root@rac1 bin]# ./crsctl start cluster -all
    [root@rac1 bin]# ./srvctl status database -d bol
    [root@rac1 bin]# ./srvctl start database -d bol
    [root@rac1 bin]# ./emctl start bol #开启OEM

    所有实例和服务的状态
    [oracle@rac1 ~]$ srvctl status database -d bol
    Instance bol1 is running on node rac1
    Instance bol2 is running on node rac2
    单个实例的状态
    [oracle@rac1 ~]$ srvctl status instance -d bol -i bol1
    Instance bol1 is running on node rac1
    列出配置的所有数据库
    [oracle@rac1 ~]$ srvctl config database
    bol
    特定节点上节点应用程序的状态
    [oracle@rac1 ~]$ srvctl status nodeapps -n rac1
    VIP rac1vip is enabled
    VIP rac1vip is running on node: rac1
    Network is enabled
    Network is running on node: rac1
    GSD is disabled
    GSD is not running on node: rac1
    ONS is enabled
    ONS daemon is running on node: rac1
    ASM 实例的状态
    [oracle@rac1 ~]$ srvctl status asm -n rac1
    ASM is running on rac1
    显示 RAC 数据库的配置
    [oracle@rac1 ~]$ srvctl config database -d bol
    Database unique name: bol
    Database name: bol
    Oracle home: /u01/app/oracle/product/11.2.0/db_1
    Oracle user: oracle
    Spfile: +DATA/bol/spfilebol.ora
    Domain:
    Start options: open
    Stop options: immediate
    Database role: PRIMARY
    Management policy: AUTOMATIC
    Server pools: bol
    Database instances: bol1,bol2
    Disk Groups: DATA,FRA
    Mount point paths:
    Services:
    Type: RAC
    Database is administrator managed
    显示节点应用程序的配置 —(VIP、GSD、ONS、监听器)
    [oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -a -g -s -l
    -n <node_name> option has been deprecated.
    Warning:-l option has been deprecated and will be ignored.
    Network exists: 1/10.15.4.0/255.255.252.0/eth0, type static
    VIP exists: /rac1vip/10.15.7.13/10.15.4.0/255.255.252.0/eth0, hosting node rac1
    GSD exists
    ONS exists: Local port 6100, remote port 6200, EM port 2016
    Warning:-n option has been deprecated and will be ignored.
    Name: LISTENER
    Network: 1, Owner: grid
    Home: <CRS home>
    /u01/app/11.2.0/grid on node(s) rac1,rac2
    End points: TCP:1521
    显示 ASM 实例的配置
    [oracle@rac1 ~]$ srvctl config asm -n rac1
    Warning:-n option has been deprecated and will be ignored.
    ASM home: /u01/app/11.2.0/grid
    ASM listener: LISTENER

    集群中所有正在运行的实例
    SELECT
    inst_id
    , instance_number inst_no
    , instance_name inst_name
    , parallel
    , status
    , database_status db_status
    , active_state state
    , host_name host
    FROM gv$instance
    ORDER BY inst_id;

    1 1 bol1 YES OPEN ACTIVE NORMAL rac1
    2 2 bol2 YES OPEN ACTIVE NORMAL rac2

    位于磁盘组中的所有数据文件
    select name from v$datafile
    union
    select member from v$logfile
    union
    select name from v$controlfile
    union
    select name from v$tempfile;

    属于“OCR”磁盘组的所有 ASM 磁盘
    SELECT path
    FROM v$asm_disk
    WHERE group_number IN (select group_number
    from v$asm_diskgroup
    where name = 'OCR');

    /dev/asm-diskc
    /dev/asm-diskd
    /dev/asm-diskb

    12.2 创建表空间和用户


    [grid@rac1 ~]$ asmcmd
    ASMCMD> ls
    DATA/
    FRA/
    OCR/
    ASMCMD> pwd
    +DATA/BOL/DATAFILE

    #on scan_ip
    $ sqlplus "/as sysdba"
    create tablespace test datafile '+DATA/BOL/DATAFILE/test01.dbf' size 50m ;
    create tablespace SDE_TBS
    logging
    datafile '+DATA/BOL/DATAFILE/SDE_TBS.dbf'
    size 500m
    autoextend on
    next 200m maxsize 20480m
    extent management local;
    create user SDE identified by sde default tablespace SDE_TBS;
    grant connect,resource,dba to sde;

    12.3 单节点关闭和启动

    SQL> show parameter name;

    NAME TYPE VALUE
    ------------------------------------ ----------- ------------------------------
    cell_offloadgroup_name string
    db_file_name_convert string
    db_name string bol
    db_unique_name string bol
    global_names boolean FALSE
    instance_name string bol1
    lock_name_space string
    log_file_name_convert string
    processor_group_name string
    service_names string bol
    SQL> select instance_name,status from gv$instance;

    INSTANCE_NAME STATUS
    ---------------- ------------
    bol1 OPEN
    bol2 OPEN

    [grid@rac1 ~]$ srvctl stop listener -n rac1 #关闭监听
    [grid@rac1 ~]$ srvctl status listener -n rac1 #查看监听状态
    Listener LISTENER is enabled on node(s): rac1
    Listener LISTENER is not running on node(s): rac1
    [grid@rac1 ~]$ ps -ef |grep -i local=no |wc -l
    8
    [grid@rac1 ~]$ srvctl stop instance -o immediate -d bol -i bol1 #关闭节点1数据库
    [grid@rac1 ~]$ srvctl status database -d bol #查看数据库状态
    Instance bol1 is not running on node rac1
    Instance bol2 is running on node rac2
    [grid@rac1 ~]$ srvctl status instance -d bol -i bol1 #查看节点1数据库状态
    Instance bol1 is not running on node rac1

    [root@rac1 ~]# cd /u01/app/11.2.0/grid/bin/
    [root@rac1 bin]# ./crsctl stop crs
    [root@rac1 bin]# ./crs_stat -t -v #查看状态
    CRS-0184: Cannot communicate with the CRS daemon.
    [root@rac1 bin]# ./srvctl status asm -n rac1 #在rac1节点上执行
    PRKH-1010 : Unable to communicate with CRS services.
    PRKH-3003 : An attempt to communicate with the CSS daemon failed
    [root@rac2 bin]# ./srvctl status asm -n rac1 #在rac2节点上执行
    ASM is not running on rac1

    $ ps -ef |grep -i ora
    $ ps -ef |grep -i asm
    #登录scan ip节点,查看连接的实例
    select instance_name,status from gv$instance;

    #开启rac1
    [root@rac1 bin]# ./crs_stat -t
    CRS-0184: Cannot communicate with the CRS daemon.
    [root@rac1 bin]# ./crsctl start crs
    CRS-4123: Oracle High Availability Services has been started.
    [root@rac1 bin]# ./srvctl status asm #确保ASM服务已经运行在两个节点上
    ASM is running on rac2,rac1
    [root@rac1 bin]# ./crs_stat -t -v
    Name Type R/RA F/FT Target State Host
    ----------------------------------------------------------------------
    ora.DATA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
    ora.FRA.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
    ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac2
    ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac2
    ora.OCR.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
    ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1
    ora.bol.db ora....se.type 0/2 0/1 ONLINE ONLINE rac2
    ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac2
    ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
    ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1
    ora.oc4j ora.oc4j.type 0/1 0/2 ONLINE ONLINE rac2
    ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1
    ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
    ora....C1.lsnr application 0/5 0/0 OFFLINE OFFLINE
    ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE
    ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
    ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1
    ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
    ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
    ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE
    ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
    ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2
    ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac2
    [grid@rac1 ~]$ srvctl start instance -d bol -i bol1 #开启集群节点一数据库实例
    [grid@rac1 ~]$ srvctl status database -d bol
    Instance bol1 is running on node rac1
    Instance bol2 is running on node rac2
    [grid@rac1 ~]$ crs_stat -t
    Name Type Target State Host
    ------------------------------------------------------------
    ora.DATA.dg ora....up.type ONLINE ONLINE rac1
    ora.FRA.dg ora....up.type ONLINE ONLINE rac1
    ora....ER.lsnr ora....er.type ONLINE ONLINE rac1
    ora....N1.lsnr ora....er.type ONLINE ONLINE rac2
    ora.OCR.dg ora....up.type ONLINE ONLINE rac1
    ora.asm ora.asm.type ONLINE ONLINE rac1
    ora.bol.db ora....se.type ONLINE ONLINE rac1
    ora.cvu ora.cvu.type ONLINE ONLINE rac2
    ora.gsd ora.gsd.type OFFLINE OFFLINE
    ora....network ora....rk.type ONLINE ONLINE rac1
    ora.oc4j ora.oc4j.type ONLINE ONLINE rac2
    ora.ons ora.ons.type ONLINE ONLINE rac1
    ora....SM1.asm application ONLINE ONLINE rac1
    ora....C1.lsnr application ONLINE ONLINE rac1
    ora.rac1.gsd application OFFLINE OFFLINE
    ora.rac1.ons application ONLINE ONLINE rac1
    ora.rac1.vip ora....t1.type ONLINE ONLINE rac1
    ora....SM2.asm application ONLINE ONLINE rac2
    ora....C2.lsnr application ONLINE ONLINE rac2
    ora.rac2.gsd application OFFLINE OFFLINE
    ora.rac2.ons application ONLINE ONLINE rac2
    ora.rac2.vip ora....t1.type ONLINE ONLINE rac2
    ora.scan1.vip ora....ip.type ONLINE ONLINE rac2

    select value from v$parameter where name='processes';

    12.4 网络查看

    [root@rac1 bin]# ifconfig
    eth0 Link encap:Ethernet HWaddr 08:00:27:0E:DE:0E
    inet addr:10.15.7.11 Bcast:10.15.7.255 Mask:255.255.252.0
    inet6 addr: fe80::a00:27ff:fe0e:de0e/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:3907156 errors:0 dropped:0 overruns:0 frame:0
    TX packets:7161180 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:536184469 (511.3 MiB) TX bytes:9769872694 (9.0 GiB)

    eth0:1 Link encap:Ethernet HWaddr 08:00:27:0E:DE:0E
    inet addr:10.15.7.13 Bcast:10.15.7.255 Mask:255.255.252.0
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

    eth1 Link encap:Ethernet HWaddr 08:00:27:8A:F5:38
    inet addr:1.1.1.1 Bcast:1.1.3.255 Mask:255.255.252.0
    inet6 addr: fe80::a00:27ff:fe8a:f538/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:3636608 errors:0 dropped:0 overruns:0 frame:0
    TX packets:3017320 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:2524862742 (2.3 GiB) TX bytes:1669614220 (1.5 GiB)

    eth1:1 Link encap:Ethernet HWaddr 08:00:27:8A:F5:38
    inet addr:169.254.204.165 Bcast:169.254.255.255 Mask:255.255.0.0
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

    lo Link encap:Local Loopback
    inet addr:127.0.0.1 Mask:255.0.0.0
    inet6 addr: ::1/128 Scope:Host
    UP LOOPBACK RUNNING MTU:16436 Metric:1
    RX packets:2272848 errors:0 dropped:0 overruns:0 frame:0
    TX packets:2272848 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:0
    RX bytes:1524528859 (1.4 GiB) TX bytes:1524528859 (1.4 GiB)
    [root@rac2 bin]# ifconfig
    eth0 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
    inet addr:10.15.7.12 Bcast:10.15.7.255 Mask:255.255.252.0
    inet6 addr: fe80::a00:27ff:fe33:6d5a/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:8414792 errors:0 dropped:0 overruns:0 frame:0
    TX packets:1577797 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:9466367813 (8.8 GiB) TX bytes:172203059 (164.2 MiB)

    eth0:1 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
    inet addr:10.15.7.14 Bcast:10.15.7.255 Mask:255.255.252.0
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

    eth0:3 Link encap:Ethernet HWaddr 08:00:27:33:6D:5A
    inet addr:10.15.7.15 Bcast:10.15.7.255 Mask:255.255.252.0
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

    eth1 Link encap:Ethernet HWaddr 08:00:27:DC:E4:01
    inet addr:1.1.1.2 Bcast:1.1.3.255 Mask:255.255.252.0
    inet6 addr: fe80::a00:27ff:fedc:e401/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:3017568 errors:0 dropped:0 overruns:0 frame:0
    TX packets:3637072 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:1669901139 (1.5 GiB) TX bytes:2525032018 (2.3 GiB)

    eth1:1 Link encap:Ethernet HWaddr 08:00:27:DC:E4:01
    inet addr:169.254.202.95 Bcast:169.254.255.255 Mask:255.255.0.0
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

    lo Link encap:Local Loopback
    inet addr:127.0.0.1 Mask:255.0.0.0
    inet6 addr: ::1/128 Scope:Host
    UP LOOPBACK RUNNING MTU:16436 Metric:1
    RX packets:848199 errors:0 dropped:0 overruns:0 frame:0
    TX packets:848199 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:0
    RX bytes:472739764 (450.8 MiB) TX bytes:472739764 (450.8 MiB)

  • 相关阅读:
    kafka学习笔记(六)kafka的controller模块
    腾讯蓝鲸使用笔记
    kafka学习笔记(五)kafka的请求处理模块
    kafka学习笔记(四)kafka的日志模块
    kafka学习笔记(三)kafka的使用技巧
    kafka学习笔记(二)kafka的基本使用
    kafka学习笔记(一)消息队列和kafka入门
    大数据安全与RANGER学习和使用
    10-Linux 基本指令
    09-迭代器、模块
  • 原文地址:https://www.cnblogs.com/yhq1314/p/9921962.html
Copyright © 2020-2023  润新知