实验环境
类别 |
修改前 |
修改后 |
PUBLIC |
172.18.4.182 rac1 |
192.168.56.10 rac1 |
|
172.18.4.184 rac2 |
192.168.56.20 rac2 |
PRIVATE |
10.10.10.10 rac1priv |
20.20.20.10 rac1priv |
|
10.10.10.20 rac2priv |
20.20.20.20 rac1priv |
VIP |
172.18.4.186 rac1vip |
192.168.56.30 rac1vip |
|
172.18.4.187 rac2vip |
192.168.56.40 rac2vip |
SCAN |
172.18.4. 172 scanip |
192.168.56.50 scanip |
修改Public Ip
[grid@rac1 ~]$ su root
Password:
[root@rac1 grid]# oifcfg getif
eth1 10.10.10.0 global cluster_interconnect
eth0 172.18.4.0 global public
- 2) 利用oifcfg命令,进行删除,修改public ip
[grid@rac1 ~]$ oifcfg delif -global eth0/172.18.4.0
[grid@rac1 ~]$ oifcfg setif -global eth0/192.168.56.0:public
[grid@rac1 ~]$ oifcfg getif
eth1 10.10.10.0 global cluster_interconnect
eth0 192.168.56.0 global public
- 3) OS修改/etc/hosts, ifcfg-eth0文件(On all node)
# vi /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
# PUBLIC
192.168.56.10 rac1
192.168.56.20 rac2
--------------------------------------------------
# vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
BOOTPROTO=none
ONBOOT=yes
HWADDR=08:00:27:2e:0e:4e
IPADDR=192.168.56.10
NETMASK=255.255.255.0
--------------------------------------------------
重启网络服务
# service network restart
修改vip
[grid@rac1 ~]$ srvctl stop instance -d orcl -n rac1
[grid@rac1 ~]$ srvctl stop vip -n rac1 -f
[grid@rac1 ~]$ srvctl config nodeapps -a
VIP exists.:rac1
VIP exists.: /rac1vip/192.168.56.30/255.255.255.0/eth0
VIP exists.:rac2
VIP exists.: /rac2vip/192.168.56.40/255.255.255.0/eth0
#VIP
192.168.56.30 rac1vip
192.168.56.40 rac2vip
[grid@rac1 ~]$ srvctl modify nodeapps -n rac1 -A 192.168.56.30/255.255.255.0/eth0
PRCN-2018 : Current user grid is not a privileged user
[grid@rac1 ~]$ su root
Password:
[root@rac1 grid]
[grid@rac1 ~]$ srvctl start vip -n rac1
[grid@rac1 ~]$ srvctl start listener -n rac1
[grid@rac1 ~]$ srvctl start instance -d orcl -n rac1
修改SCAN IP
[grid@rac1 ~]$ srvctl stop scan_listener
[grid@rac1 ~]$ srvctl stop scan
- 2) 修改/etc/hosts文件中的SCAN IP
#SCAN IP
192.168.56.50 scanip
[grid@rac1 ~]$ su root
Password:
[root@rac1 grid]
[grid@rac1 ~]$ srvctl modify scan_listener -u
[grid@rac1 ~]$ srvctl start scan_listener
[grid@rac1 ~]$ srvctl config scan
SCAN name: scanip, Network: 1/192.168.56.0/255.255.255.0/eth0
SCAN VIP name: scan1, IP: /scanip/192.168.56.50
[grid@rac1 ~]$ srvctl config scan_listener
SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521
修改Private IP
'在对集群中所有节点操作之前,请先备份 profile.xml 配置文件。作为 grid 用户执行':
[grid@rac1 ~]$ cd /u01/11.2.0/grid/gpnp/rac1/profiles/peer/
[grid@rac1 peer]$ cp -p profile.xml profile.xml.bak
- 1) 确保集群中所有节点都已经启动并正常运行
- 2) 使用grid用户,获取以下信息,例如:
[grid@rac1 peer]$ oifcfg getif
eth1 10.10.10.0 global cluster_interconnect
eth0 192.168.56.0 global public
[grid@rac1 peer]$ oifcfg setif -global eth1/20.20.20.0:cluster_interconnect
[grid@rac1 peer]$ oifcfg getif
eth1 10.10.10.0 global cluster_interconnect
eth0 192.168.56.0 global public
eth1 20.20.20.0 global cluster_interconnect
- 4) 使用root用户关闭集群中所有节点并禁用集群(每个节点)
[grid@rac1 peer]$ su root
Password:
[root@rac1 peer]
CRS-2791: Starting shutdown of Oracle High Availability Services-managed resources on 'rac1'
CRS-2673: Attempting to stop 'ora.crsd' on 'rac1'
CRS-2790: Starting shutdown of Cluster Ready Services-managed resources on 'rac1'
CRS-2673: Attempting to stop 'ora.CRSDG.dg' on 'rac1'
CRS-2677: Stop of 'ora.LISTENER_SCAN1.lsnr' on 'rac1' succeeded
CRS-2673: Attempting to stop 'ora.scan1.vip' on 'rac1'
CRS-2677: Stop of 'ora.LISTENER.lsnr' on 'rac1' succeeded
…
CRS-4133: Oracle High Availability Services has been stopped.
[root@rac1 peer]
CRS-4621: Oracle High Availability Services autostart is disabled.
- 5) 更改/etc/hosts,ifcfg-eth1文件(两个节点)
20.20.20.10 rac1priv
20.20.20.20 rac2priv
DEVICE=eth1
BOOTPROTO=none
ONBOOT=yes
HWADDR=08:00:27:b1:33:73
IPADDR=20.20.20.10
NETMASK=255.255.255.0
重启网络服务
确保能相互ping 通
[root@rac1 ~]
[root@rac1 ~]
- 6) 使用root用户激活集群并重新启动集群中所有节点(每个节点)
[grid@rac1 ~]$ su root
Password:
[root@rac1 grid]
CRS-4622: Oracle High Availability Services autostart is enabled.
[root@rac1 grid]
CRS-4123: Oracle High Availability Services has been started.
[grid@rac2 ~]$ oifcfg delif -global eth1/10.10.10.0
[grid@rac2 ~]$ oifcfg getif
eth0 192.168.56.0 global public
eth1 20.20.20.0 global cluster_interconnect