• Hadoop2.0重启脚本


    Hadoop2.0重启脚本

    方便重启带ha的集群,写了这个脚本

    #/bin/bash

    sh /opt/zookeeper-3.4.5-cdh4.4.0/bin/zkServer.sh restart

    sh /opt/hadoop-2.0.0-cdh4.5.0/sbin/hadoop-daemon.sh start journalnode

    sh /opt/hadoop-2.0.0-cdh4.5.0/sbin/stop-dfs.sh

    sh /opt/hadoop-2.0.0-cdh4.5.0/bin-mapreduce1/stop-mapred-ha.sh

    sh /opt/hadoop-2.0.0-cdh4.5.0/sbin/start-dfs.sh

    sh /opt/hadoop-2.0.0-cdh4.5.0/bin-mapreduce1/start-mapred-ha.sh

    sh /opt/hadoop-2.0.0-cdh4.5.0/bin/hdfs haadmin -DfSHAadmin -failover nn1 nn0

    #wait 30

    sh /opt/hadoop-2.0.0-cdh4.5.0/bin-mapreduce1/hadoop mrhaadmin -failover jt2 jt1

    附录:

    单独启动ha jt

    hadoop-cdh4.5-ha-start-up

    配置:

    配置位置

    文件1:start-mapred-ha.sh$HADOOP_HOME/bin-mapreduce1/start-mapred-ha.sh

    文件2:stop-mapred-ha.sh$HADOOP_HOME/bin-mapreduce1/stop-mapred-ha.sh

    文件3:hadoop-daemons-mrha.sh$HADOOP_HOME/bin-mapreduce1/hadoop-daemons-mrha.sh

    文件4:hagroups.sh$HADOOP_HOME/bin-mapreduce1/hagroups.sh

    文件5:hagroups$HADOOP_CONF_DIR/hagroups

    用法:

    启动:$HADOOP_HOME/bin-mapreduce1/start-mapred-ha.sh

    关闭:$HADOOP_HOME/bin-mapreduce1/stop-mapred-ha.sh

    文件1:start-mapred-ha.sh

    #!/usr/bin/env bash

    bin=`dirname "$0"`

    bin=`cd "$bin"; pwd`

    . "$bin"/hadoop-config.sh

    # start jobtrackerha first to minimize connection errors at startup

    "$bin"/hadoop-daemons-mrha.sh --config $HADOOP_CONF_DIR start mrzkfc

    "$bin"/hadoop-daemons-mrha.sh --config $HADOOP_CONF_DIR start jobtrackerha

    "$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start tasktracker

    文件2:stop-mapred-ha.sh

    #!/usr/bin/env bash

    # Stop hadoop map reduce daemons.  Run this on master node.

    bin=`dirname "$0"`

    bin=`cd "$bin"; pwd`

    . "$bin"/hadoop-config.sh

    "$bin"/hadoop-daemons-mrha.sh --config $HADOOP_CONF_DIR stop jobtrackerha

    "$bin"/hadoop-daemons-mrha.sh --config $HADOOP_CONF_DIR stop mrzkfc

    "$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop tasktracker

    文件3:hadoop-daemons-mrha.sh

    #!/usr/bin/env bash

    usage="Usage: hadoop-daemons-mrha.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."

    # if no args specified, show usage

    if [ $# -le 1 ]; then

      echo $usage

      exit 1

    fi

    bin=`dirname "$0"`

    bin=`cd "$bin"; pwd`

    . $bin/hadoop-config.sh

    exec "$bin/hagroups.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" ; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"

    文件4:hagroups.sh

    #!/usr/bin/env bash

    usage="Usage: hagroups.sh [--config confdir] command..."

    # if no args specified, show usage

    if [ $# -le 0 ]; then

      echo $usage

      exit 1

    fi

    bin=`dirname "$0"`

    bin=`cd "$bin"; pwd`

    . "$bin"/hadoop-config.sh

    # If the slaves file is specified in the command line,

    # then it takes precedence over the definition in 

    # hadoop-env.sh. Save it here.

    HOSTLIST=$HADOOP_SLAVES

    if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then

      . "${HADOOP_CONF_DIR}/hadoop-env.sh"

    fi

    if [ "$HOSTLIST" = "" ]; then

      if [ "$HADOOP_SLAVES" = "" ]; then

        export HOSTLIST="${HADOOP_CONF_DIR}/hagroups"

      else

        export HOSTLIST="${HADOOP_SLAVES}"

      fi

    fi

    for slave in `cat "$HOSTLIST"|sed  "s/#.*$//;/^$/d"`; do

     ssh $HADOOP_SSH_OPTS $slave $"${@// /\ }"

       2>&1 | sed "s/^/$slave: /" &

     if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then

       sleep $HADOOP_SLAVE_SLEEP

     fi

    done

    wait

    文件5:hagroups

    h2master1

    h2master2

  • 相关阅读:
    167. 两数之和 II
    14. 最长公共前缀
    28. 实现strStr()
    118. 杨辉三角
    54. 螺旋矩阵
    498. 对角线遍历
    66. 加一
    747. 至少是其他数字两倍的最大数
    34. 在排序数组中查找元素的第一个和最后一个位置
    164. 寻找峰值
  • 原文地址:https://www.cnblogs.com/qinche/p/3529428.html
Copyright © 2020-2023  润新知