Linux -- 基于zookeeper的java api(二)
写一个关于基于集群的zookeeper的自定义实现HA
基于客户端和监控器:使用监控的方法查看每个注册过的节点的状态来做出操作。
Watch:监控端
package com.huhu.zookeeper; import java.util.concurrent.CountDownLatch; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooKeeper; public class AUTIManager { // zk对象 static ZooKeeper zk; // 线程类 用来组织程序运行 static CountDownLatch cdl = new CountDownLatch(1); String connection = "hu-hadoop1:2181,hu-hadoop2:2181,hu-hadoop3:2181"; int TIMEOUT = 2000; // 监视器 Watcher connectionWatch = new Watcher() { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub System.out.println("zk服務通知--------------------" + event.toString()); } }; // 监视器 Watcher existsWath = new Watcher() { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub if (event.getType().toString().equals("None")) { System.out.println("none"); } else if (event.getType().toString().equals("NodeCreated")) { System.out.println(event.getPath() + "节点上线"); } else if (event.getType().toString().equals("NodeDeleted")) { System.out.println(event.getPath() + "节点下线"); } else if (event.getType().toString().equals("NodeDataChanged")) { System.out.println(event.getPath() + "节点数据已经被修改"); } else if (event.getType().toString().equals("NodeChildrenChanged")) { System.out.println(event.getPath() + "节点的子节点数据已经被修改"); } else { System.out.println(event.toString()); } // 重复注册 watch 事件是一次性消费 try { zk.exists("/1708a1/node1", existsWath); zk.exists("/1708a1/node2", existsWath); zk.exists("/1708a1/node3", existsWath); } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } }; // 初始化 public void init() { try { zk = new ZooKeeper(connection, TIMEOUT, connectionWatch); } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } // 将节点 /1708a1/node增加一个监听器 public void Lookexit() { try { zk.exists("/1708a1/node1", existsWath); zk.exists("/1708a1/node2", existsWath); zk.exists("/1708a1/node3", existsWath); } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } public static void main(String[] args) throws Exception { AUTIManager m = new AUTIManager(); m.init(); m.Lookexit(); cdl.await(); } }
client1:
package com.huhu.zookeeper; import java.util.concurrent.CountDownLatch; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; public class Client1 { static CountDownLatch cdl = new CountDownLatch(1); static String connection = "hu-hadoop1:2181,hu-hadoop2:2181,hu-hadoop3:2181"; static int TIMEOUT = 2000; static ZooKeeper zk; static Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub System.out.println("接受到zk服務信息,会话连接完成"); System.out.println("zk为:" + zk); try { Stat s = zk.exists("/1708a1", true); if (s == null) { System.out.println("节点不存在,创建节点"); zk.create("/1708a1", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); System.out.println("/1708al/注册成并创建节点"); zk.create("/1708a1/node1", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("创建node1 OK!!"); } else { zk.create("/1708a1/node1", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("創建node1 OK!!"); } } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } }; public static void main(String[] args) throws Exception { System.out.println("客戶2"); zk = new ZooKeeper(connection, TIMEOUT, watcher); System.out.println("zk对象:" + zk); cdl.await(); } }
client2:
package com.huhu.zookeeper; import java.util.concurrent.CountDownLatch; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; public class Client2 { static CountDownLatch cdl = new CountDownLatch(1); static String connection = "hu-hadoop1:2181,hu-hadoop2:2181,hu-hadoop3:2181"; static int TIMEOUT = 2000; static ZooKeeper zk; static Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub System.out.println("接受到zk服務信息,会话连接完成"); System.out.println("zk为:" + zk); try { Stat s = zk.exists("/1708a1", true); if (s == null) { System.out.println("节点不存在,创建节点"); zk.create("/1708a1", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); System.out.println("/1708a1注册成并创建节点"); zk.create("/1708a1/node2", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("创建node2 OK!!"); } else { zk.create("/1708a1/node2", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("創建node2 OK!!"); } } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } }; public static void main(String[] args) throws Exception { System.out.println("客戶2"); zk = new ZooKeeper(connection, TIMEOUT, watcher); System.out.println("zk对象:" + zk); cdl.await(); } }
client3:
package com.huhu.zookeeper; import java.util.concurrent.CountDownLatch; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; public class Client3 { static CountDownLatch cdl = new CountDownLatch(1); static String connection = "hu-hadoop1:2181,hu-hadoop2:2181,hu-hadoop3:2181"; static int TIMEOUT = 2000; static ZooKeeper zk; static Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub System.out.println("接受到zk服務信息,会话连接完成"); System.out.println("zk为:" + zk); try { Stat s = zk.exists("/1708a1", true); if (s == null) { System.out.println("节点不存在,创建节点"); zk.create("/1708a1", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); System.out.println("/1708al/注册成并创建节点"); zk.create("/1708a1/node3", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("创建node3 OK!!"); } else { zk.create("/1708a1/node3", "123".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println("創建node3 OK!!"); } } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } } }; public static void main(String[] args) throws Exception { System.out.println("客戶3"); zk = new ZooKeeper(connection, TIMEOUT, watcher); System.out.println("zk对象:" + zk); cdl.await(); } }
启动监控端:
将集群中没有/1708a1
启动3个client:
然后分别让他们下线:(停掉服务)
效果很明显。
谈谈mysql和zookeeper:
我自己瞎扯扯淡。。。
mysql和zookeeper都是用来存在数据的,但是存储方式优点不同,那点是MySQL是用表来存储的,zookeeper是按照数的结构存储的,但是zookeeper解决了MySQL的一个从始至终都没有解决的方法。就是并发问题,这就是zookeeper的优势zookeeper使用搭建集群的方式来解决并发问题(这里说的有点绝对)。
MySQL中的主从机制,写只能写入主中,而zookeeper中不存在,你写哪里都可以,并且它实时的和主同步数据,使从和主之间的数据同步。
MySQL由于有并发问题,这里的并发主要表现在读和写之间的问题,从而引出了线程问题(原子性,可见性,有序性)(隔离级别),从而有了rabbitmq和solr,这些解决方案,减少数据库的读,让它可以更准确的写和读。而zookeeper则也可以当作一个监控者或者大黑板,降低业务间的耦合度,解决了并发问题
这里不一定是正确的,是我自己对它们之间的感觉