• 配置节点oracle10g rac部署文档


    在写这篇文章之前,xxx经已写过了几篇关于改配置节点主题的文章,想要了解的朋友可以去翻一下之前的文章

        主机络网配置注意事项:

        ip地址用使态静配置:static

        关网要指定

        hostname不要涌现在回环地址!

        如果动启过单机asm服务,请先停止

        ------------------------------------------------------------------------------------------

        1,配置2台呆板IP能是PING通

        2变动呆板名字

        vi /etc/sysconfig/network

        3,配置态静IP vi /etc/sysconfig/network-scripts/ifcfg-eth0

        4,CP 一个网卡eth:2 --跳心IP

        cp ifcfg-eth0 ifcfg-eth0:2

        变动面里信息

        DEVICE=eth0:2

        BOOTPROTO=static

        IPADDR=10.10.10.11

        GATEWAY=10.10.10.1

        改完启重service network restart

        5,配置HOSTS件文 A呆板 和B呆板一样

        192.168.10.11 oracle1     --主机态静私有IP

        192.168.10.12 oracle2     --主机态静私有IP

      

        192.168.10.21 oracle1-vip --浮动IP

        192.168.10.22 oracle2-vip --浮动IP

        10.10.10.11 oracle1-priv  --跳心线IP

        10.10.10.12 oracle2-priv  --跳心线IP

        6,配置hangcheck-timer

        vi /etc/rc.local

        7,设置ORACLE用户密码

        8,设置私钥、公钥

        9,把A的传给B,B追加到authorized_keys,再传给A 2台呆板都有融合的信息件文

        10,SSH连接把验证一下个每节点验证2次,1次自己的(态静、跳心),1次对方的(态静、跳心)。

        11,

        ------------------------------------------------------------------------------------------

        在node1 & node2 运行install.sh

        为oracle用户设置口令

        修改oracle用户.bashrc件文

        export ORA_CRS_HOME=/u01/app/crs_1

        export ORACLE_SID=racdb#

        su -

        chown oracle.oinstall /u01/app -R

        配置络网:

        vi /etc/hosts

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        127.0.0.1  localhost.localdomain localhost

        ::1        localhost6.localdomain6 localhost6

        # Public Network - (eth0)

        192.168.3.50 stu50

        192.168.3.52 stu52

        # Public Virtual IP (eth0:1)

        192.168.3.51 stu50-vip

        192.168.3.53 stu52-vip

        # Private Interconnect - (eth1 -> eth0:2)

        10.0.0.50 stu50-priv

        10.0.0.52 stu52-priv

        配置eth0:2:

        cd /etc/sysconfig/network-scripts/

        cp ifcfg-eth0 ifcfg-eth0:2

        DEVICE=eth0:2

        BOOTPROTO=static

        HWADDR=00:E0:4D:3B:0C:B2

        IPADDR=10.0.0.50

        IPV6INIT=yes

        IPV6_AUTOCONF=yes

        NETMASK=255.255.255.0

        GATEWAY=10.0.0.1

        ONBOOT=yes

        配置hangcheck-timer:用于监视 Linux 内核是不是挂起

        vi /etc/modprobe.conf

        options hangcheck-timer hangcheck_tick=30 hangcheck_margin=180

        主动载加hangcheck-timer 模块/u01/app/oracle/product/10.2.0/db_1

        vi /etc/rc.local

        modprobe hangcheck-timer--添加完在执行一下这句话

        查检hangcheck-timer模块是不是经已载加:

        lsmod | grep hangcheck_timer

        配置信任关系:

        node1:192.168.3.50

        su - oracle

        ssh-keygen -t rsa

        ssh-keygen -t dsa

        cd .ssh

        cat *.pub > authorized_keys

        node2:192.168.3.52

        su - oracle

        ssh-keygen -t rsa

        ssh-keygen -t dsa

        cd .ssh

        cat *.pub > authorized_keys

        node1:192.168.3.50

        scp authorized_keys oracle@192.168.3.52:/home/oracle/.ssh/keys_dbs

        node2:192.168.3.52

        cat keys_dbs >> authorized_keys

        scp authorized_keys oracle@192.168.3.50:/home/oracle/.ssh/

        试测信任关系:

        node1:192.168.3.50

        node2:192.168.3.52

        ssh stu50

        ssh stu52

        ssh stu50-priv

        ssh stu52-priv

        备准公用卷:iscsi

        rpm -ivh compat-db-4.2.52-5.1.i386.rpm

        rpm -ivh libXp-1.0.0-8.1.el5.i386.rpm

        rpm -ivh openmotif22-2.2.3-18.i386.rpm

        node1 : 192.168.3.50 stu50 (iscsi server)

        分别10G分区作为iscsi享共盘磁:

        分区:/dev/sda5  5889        7105     9775521   83  Linux

        iscsi 服务端:ClusterStorage目录下

        rpm -ivh perl-Config-General-2.40-1.el5.noarch.rpm

        rpm -ivh scsi-target-utils-0.0-5.20080917snap.el5.x86_64.rpm

        Server目录下

        rpm -ivh iscsi-initiator-utils-6.2.0.871-0.16.el5.i386.rpm

        vi /etc/tgt/targets.conf

        ----------------------------------------

     <target iqn.2011-01.com.oracle.blues:luns1>

            backing-store /dev/sda9

           initiator-address 10.1.1.0/24

     </target>

        ----------------------------------------

        vi /etc/udev/scripts/iscsidev.sh

        ----------------------------------------

        #!/bin/bash

     BUS=${1}

     HOST=${BUS%%:*}

     [ -e /sys/class/iscsi_host ] || exit 1

     file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"

     target_name=$(cat ${file})

     if [ -z "${target_name}" ] ; then

            exit 1

     fi

     echo "${target_name##*:}"

        ----------------------------------------

        chmod +x /etc/udev/scripts/iscsidev.sh

        chkconfig iscsi on

        chkconfig iscsid on

        chkconfig tgtd on

        service iscsi start

        service iscsid start

        service tgtd start

        tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL --开启ISCSI

        tgtadm --lld iscsi --op show --mode target      --查看LUN

        iscsiadm -m discovery -t sendtargets -p 10.1.1.103

        service iscsi start

        fdisk -l

        从新描扫服务器

        iscsiadm -m session -u

        iscsiadm -m discovery -t sendtargets -p 10.1.1.103

        vi /etc/rc.local

        tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL

        service iscsi start

        iscsi客户端配置 client : 10.1.1.103

        rpm -ivh iscsi-initiator-utils-6.2.0.871-0.16.el5.i386.rpm

        vi /etc/udev/rules.d/55-openiscsi.rules

        -----------------------------------------------

        KERNEL=="sd*",BUS=="scsi",PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c"

        -----------------------------------------------

        vi /etc/udev/scripts/iscsidev.sh

        ----------------------------------------

        #!/bin/bash

        BUS=${1}

        HOST=${BUS%%:*}

        [ -e /sys/class/iscsi_host ] || exit 1

        file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"

        target_name=$(cat ${file})

        if [ -z "${target_name}" ] ; then

           exit 1

        fi

        echo "${target_name##*:}"

        ----------------------------------------

        chmod +x /etc/udev/scripts/iscsidev.sh

        service iscsi start

        iscsiadm -m discovery -t sendtargets -p 10.1.1.18 -l

        service iscsi start

        fdisk -l

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        对iscsi享共盘分区:

        将iscsi享共分区变成裸备设:

        vi /etc/udev/rules.d/60-raw.rules

        ACTION=="add", KERNEL=="sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"

        ACTION=="add", KERNEL=="sdb2", RUN+="/bin/raw /dev/raw/raw2 %N"

        ACTION=="add", KERNEL=="sdb3", RUN+="/bin/raw /dev/raw/raw3 %N"

        ACTION=="add", KERNEL=="sdb5", RUN+="/bin/raw /dev/raw/raw4 %N"

        KERNEL=="raw[1]", MODE="0660", GROUP="oinstall", OWNER="oracle"

        KERNEL=="raw[2]", MODE="0660", GROUP="oinstall", OWNER="oracle"

        KERNEL=="raw[3]", MODE="0660", GROUP="oinstall", OWNER="oracle"

        KERNEL=="raw[4]", MODE="0660", GROUP="oinstall", OWNER="oracle"

        分别在node1 & node2动启udev:

        start_udev

        每日一道理
    “上下五千年,龙的看火不灭;古有愚公志,而今从头越…… ”站在新世纪的门槛上,我们的追求就是让祖国灿烂的喜悦飞扬在美好的明天……

        分别在node1 & node2确认裸备设被载加:

        [root@stu50 ~]# ll /dev/raw

        总计 0

        crw-rw---- 1 root   oinstall 162, 1 01-11 12:44 raw1

        crw-rw---- 1 oracle oinstall 162, 2 01-11 12:44 raw2

        crw-rw---- 1 oracle oinstall 162, 3 01-11 12:44 raw3

        crw-rw---- 1 oracle oinstall 162, 4 01-11 12:44 raw4

        用使CVU校验集群装安可行性:

        ./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose

        装安clusterware软件(需只在一个节点做,但要手工将其它节点加入到群):

        /mnt/clusterware/runInstaller

        注意:在弹出求要运行root.sh脚本的对话框时先不要运行root.sh脚本先修改vipca和srvctl脚本,

        不然运行脚本中程过调用java会报错!

        su - oracle

        vi +123 $CRS_HOME/bin/vipca

        在123行 fi 后新添加一行:

        unset LD_ASSUME_KERNEL 

        vi + $CRS_HOME/bin/srvctl

        在export LD_ASSUME_KERNEL这一行后加

        unset LD_ASSUME_KERNEL

        在最后一个节点运行root.sh如果涌现面下误错,请按面下蓝色字体部份解决!

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        Running vipca(silent) for configuring nodeapps

        Error 0(Native: listNetInterfaces:[3])

      [Error 0(Native: listNetInterfaces:[3])]

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        注意红颜色部份!要按自己的络网配置修改(注意络网适配器名称和IP地址!不要目盲照抄!)!

        cd /u01/app/crs_1/bin

        #./oifcfg iflist

        #./oifcfg setif -global eth0/10.1.1.0:public

        #./oifcfg setif -global eth0:2/10.0.0.0:cluster_interconnect

        #./oifcfg getif

        如果两台实验机的网卡一个是eth0一个是eth1,那么按如下方法修改:

        ./oifcfg setif -node node1 eth0/:10.1.1.0:public

        ./oifcfg setif -node node1 eth0:0/172.20.1.0:cluster_interconnect

        ./oifcfg setif -node node1 eth0:1/172.20.1.0:cluster_interconnect

        ./oifcfg setif -node node2 eth1/:10.1.1.0:public

        ./oifcfg setif -node node2 eth1:0/172.20.1.0:cluster_interconnect

        ./oifcfg setif -node node2 eth1:1/172.20.1.0:cluster_interconnect

        ############ 效果 ########

        [root@server bin]# ./oifcfg getif

        eth0  10.1.1.0  node1  public

        eth0:0  172.20.1.0  node1  cluster_interconnect

        eth0:1  172.20.1.0  node1  cluster_interconnect

        eth1  10.1.1.0  node2  public

        eth1:0  172.20.1.0  node2  cluster_interconnect

        eth1:1  172.20.1.0  node2  cluster_interconnect

        ############ 效果 ########

        设置络网接口后在以后节点手工运行vipca

        unset LANG

        ./vipca

        vipca导向将资源动启后,查看各资源状态

        cd $ORA_CRS_HOME/bin

        ./crs_stat -t

        查看各资源详细信息:

        ./crs_stat

        ./crs_stat -p

        clusterware软件装安胜利后之备份一下ocr!

        ./ocrconfig -export /home/oracle/ocr.bak

        装安数据库软件(需只在一个节点做,会涌现多节点的择选项选):装安时择选只装安软件不键库

        /mnt/database/runInstaller

        clusterware管理:

        查看voting disk位置:

        #./crsctl query css votedisk

        备份voting disk

        dd if=voting_disk_name of=backup_file_name bs=4k

        原还voting disk

        dd if=backup_file_name of=voting_disk_name bs=4k

        添加新的表决盘磁:

        # crsctl add css votedisk <new voting disk path>

        除删表决盘磁:

        # crsctl delete css votedisk <old voting disk path>

        如果有所节点上的 Oracle Clusterware 都已关闭,请用使 –force 项选:

        # crsctl add css votedisk <new voting disk path> -force

        # crsctl delete css votedisk <old voting disk path> -force

        查看OCR的位置

        #./ocrcheck

        找到物理备份:

        $ocrconfig -showbackup

        查检ocr容内:

        # ocrdump –backupfile file_name 

        查检 OCR 完整性: 

        $ cluvfy comp ocr -n all 

        OCR 会在以下间时主动停止备份:

        每 4 小时:CRS 会保存最后 3 个本副。 

        天每结束时:CRS 会保存最后 2 个本副。 

        每周结束时:CRS 会保存最后 2 个本副。

        变动主动备份的默许位置:

        # ocrconfig –backuploc /shared/bak

        原还 OCR 物理备份:

        # crsctl stop crs 

        # ocrconfig –restore <CRS HOME>/cdata/jfv_clus/day.ocr  

        # crsctl start crs 

        手工备份:

        /data/oracle/crs/bin/ocrconfig -export /data/backup/rac/ocrdisk.bak

        原还逻辑 OCR 备份:

        # crsctl stop crs 

        # ocrconfig –import /shared/export/ocrback.dmp  

        # crsctl start crs 

        查检 OCR 完整性: 

        $ cluvfy comp ocr -n all 

        停止crs:

        /etc/init.d/init.crs stop

        动启crs:

        /etc/init.d/init.crs start

        查看系统活动:

        tail -f /var/log/message

        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        手工卸载clusterware:如果clusterware装安失败可以用使面下方法卸载clusterware!装安胜利就不要卸载了!脚本别瞎用!

        cd /u01/app/crs_1/install

        ./rootdelete.sh

        ./rootdeinstall.sh

        rm -fr /etc/ora*

        rm -fr /etc/init.d/*.crs

        rm -fr /etc/init.d/*.crsd

        rm -fr /etc/init.d/*.css

        rm -fr /etc/init.d/*.cssd

        su - oracle

        rm -fr $ORACLE_BASE/*

        [node1.up.com]      [node2.up.com]

     [storage.up.com]

        node1.up.com:

        192.168.0.7

        node2.up.com:

        192.168.0.8

        storage.up.com:

        192.168.0.123

        存储配置(storage):

        1,scsi

        [root@storage ~]# rpm -qa | grep scsi

        scsi-target-utils-0.0-5.20080917snap.el5

        2,备准>5G

        dd if=/dev/zero of=/tmp/disk.img bs=1G count=5

        3,宣布这个空间

        /etc/tgt/targets.conf

        ----------------------------

        <target iqn.2010-04-07.com.up.storage:sharedisk>

            backing-store /tmp/disk.img

            initiator-address 192.168.0.7

            initiator-address 192.168.0.8

        </target>

        -----------------------------

        service  tgtd start

        测检:

        tgtadm --lld iscsi --op show --mode target

        4,chkconfig

        chkconfig tgtd on

        节点配置(nodeX):

        1,iscsi客户端

        [root@node1 cluster]# rpm -qa | grep iscsi

        iscsi-initiator-utils-6.2.0.868-0.18.el5

        [root@node1 cluster]# ls arb/iscsi/

        ifaces  isns  nodes  send_targets  slp  static

        如果有替换,那么定一清除:

        [root@node1 cluster]# rm -rf arb/iscsi/*

        !!!主张:

        service iscsid start

        [root@node1 cluster]# iscsiadm -m discovery -t sendtargets -p 192.168.0.123:3260

        192.168.0.123:3260,1 iqn.2010-04-07.com.up.storage:sharedisk

        2,录登存储

        /etc/init.d/iscsi start

        3,Udev略策

        方法:

        udevinfo -a -p  /sys/block/sdX

        根据这个出输信息。

        然后:

        [root@ rules.d]# cat /etc/udev/rules.d/55-iscsi.rules 

        SUBSYSTEM=="block",SYSFS{size}=="19551042",SYSFS{model}=="VIRTUAL-DISK",SYMLINK="iscsidisk"

        刷新:

        start_udev

        5,导入存储

        [root@node1 cluster]# /etc/init.d/iscsi start

        iscsid (pid 5714 5713) 正在运行...

        设置 iSCSI 目标:Logging in to [iface: default, target: iqn.2010-04-07.com.up.storage:sharedisk, portal: 192.168.0.123,3260]

        Login to [iface: default, target: iqn.2010-04-07.com.up.storage:sharedisk, portal: 192.168.0.123,3260]: successful

                                                               [肯定]

        [root@node1 cluster]# ls /dev/iscsi/ -l

        总计 0

        lrwxrwxrwx 1 root root 6 04-07 10:34 sharedisk -> ../sdb

        6,修改LVM(持支Cluster)

        yum install lvm2-cluster

        [root@node1 cluster]# lvmconf --enable-cluster

        [root@node1 cluster]# ls /etc/lvm/lvm.conf

        /etc/lvm/lvm.conf

        7,动启clvmd

        求要cman动启的情况下 

        /etc/init.d/clvmd start

        8,常正配置你的LVM

       63  pvcreate /dev/iscsidisk 

       64  vgcreate vg /dev/iscsidisk 

       65  lvcreate -n

       66  lvcreate -h

       67  lvcreate  -L 4G -n lv01 vg

     tar -xzvf ora.tar.gz 解压

        alter system set LOCAL_LISTENER="(ADDRESS=(PROTOCOL=TCP)(HOST=<VIP_address>)(PORT=1521))" scope=both sid='instance'

    文章结束给大家分享下程序员的一些笑话语录:  一边用着越狱的ip,一边拜乔帮主的果粉自以为是果粉,其实在乔帮主的眼里是不折不扣的叛徒。

  • 相关阅读:
    为什么要用webUI?
    探索WebKit内核(一)------ 菜鸟起步
    主进程退出的时候,杀死所有子进程
    那两年炼就的Android内功修养
    飞鸽---局域网聊天软件攻防战
    如何利用Fluxion诱惑目标用户获取WPA密码
    性能测试:CPU内存,硬盘IO读写,带宽速度,UnixBench
    解决maven编译错误:程序包com.sun.xml.internal.ws.spi不存在
    Apache-Flink深度解析-DataStream-Connectors之Kafka
    linux下find(文件查找)命令的用法总结
  • 原文地址:https://www.cnblogs.com/jiangu66/p/3067729.html
Copyright © 2020-2023  润新知