• 安装环境


    最近使用云服务器的时候,总是需要安装环境,在此记录一些常用的命令

    常用命令

    查看系统版本:
    getconf LONG_BIT

    断开连接:
    vim /etc/ssh/sshd_config

    ClientAliveInterval 60
    ClientAliveCountMax 600

    systemctl restart sshd.service
    创建软件目录
    mkdir /opt/hadoop
    mkdir /opt/hive
    mkdir /opt/pkg
    mkdir /opt/mysql

    JAVA

    mkdir /opt/java
    cd
     /opt/java
    wget https://mirrors.tuna.tsinghua.edu.cn/Adoptium/8/jdk/x64/linux/OpenJDK8U-jdk_x64_linux_hotspot_8u345b01.tar.gz
    mv OpenJDK8U-jdk_x64_linux_hotspot_8u345b01.tar.gz jdk.tar.gz
    tar -xvf jdk.tar.gz
    mv jdk8u345-b01 jdk
    mv jdk.tar.gz /opt/pkg/
    环境变量:
    vim /root/.bashrc

    alias l='ls -l'

    export JAVA_HOME=/opt/java/jdk
    export JRE_HOME=/opt/java/jdk/jre
    export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
    export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH

    source /root/.bashrc

    java

    HADOOP

    cd /opt/hadoop/
    wget https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.3.2/hadoop-3.3.2.tar.gz
    tar -xvf hadoop-3.3.2.tar.gz
    mv hadoop-3.3.2 hadoop
    mv hadoop-3.3.2.tar.gz hadoop.tar.gz
    mv hadoop.tar.gz /opt/pkg/
    环境变量:
    vim /root/.bashrc

    export HADOOP_HOME=/opt/hadoop/hadoop/
    export PATH=${PATH}:${HADOOP_HOME}/bin

    修改配置:
    vim /opt/hadoop/hadoop//etc/hadoop/hadoop-env.sh
    export JAVA_HOME=/opt/java/jdk

    source /root/.bashrc

    hadoop version

    MYSQL

    mkdir /opt/pkg
    mkdir /opt/mysql

    cd /opt/mysql
    wget https://mirrors.tuna.tsinghua.edu.cn/mysql/downloads/MySQL-8.0/mysql-8.0.27-el7-x86_64.tar.gz
    tar -xvf mysql-8.0.27-el7-x86_64.tar.gz
    mv mysql-8.0.27-el7-x86_64.tar.gz mysql.tar.gz
    mv mysql.tar.gz /opt/pkg/
    mv mysql-8.0.27-el7-x86_64 mysql

    groupadd mysql && useradd -r -g mysql mysql
    mkdir -p /opt/mysql/mysql/data/mysql
    chown mysql:mysql -R /opt/mysql

    vim /etc/my.cnf


    [mysqld]
    bind-address=0.0.0.0
    port=3306
    user=mysql
    basedir=/opt/mysql/mysql
    datadir=/opt/mysql/mysql/data/mysql
    socket=/tmp/mysql.sock
    log-error=/opt/mysql/mysql/data/mysql/mysql.err
    pid-file=/opt/mysql/mysql/data/mysql/mysql.pid
    character_set_server=utf8mb4
    symbolic-links=0
    explicit_defaults_for_timestamp=true


    yum install -y libaio
    cd /opt/mysql/mysql/bin
    ./mysqld --defaults-file=/etc/my.cnf --basedir=/opt/mysql/mysql --datadir=/opt/mysql/mysql/data/mysql --user=mysql --initialize

    密码:
    cat /opt/mysql/mysql/data/mysql/mysql.err

    root zEJ<S3qZflE,

    cp /opt/mysql/mysql/support-files/mysql.server /etc/init.d/mysql
    service mysql start

    cd /opt/mysql/mysql/bin
    ./mysql -u root -p'123456'
    use mysql;
    alter user 'root'@'localhost' identified by '123456';
    update user set host = '%' where user = 'root';
    --显示端口
    show global variables like 'port';



    create database test;
    use test;
    CREATE TABLE IF NOT EXISTS student (
        id INT(20) NOT NULL AUTO_INCREMENT COMMENT '学号',
        name VARCHAR(30) NOT NULL COMMENT '姓名',
        score INT(2) NOT NULL  COMMENT '性别',
        PRIMARY KEY(id)
    )ENGINE=INNODB DEFAULT CHARSET=utf8;
    insert into student values(1,'zhangsan',1);
    insert into student values(2,'lisi',2);
    insert into student values(3,'wangwu',3);
    insert into student values(4,'sunliu',4);
    insert into student values(5,'liuqi',5);


    vim /root/.bashrc

    export MYSQL_HOME=/opt/mysql/mysql
    export PATH=$PATH:$MYSQL_HOME/bin


    mysql常用命令:
    ALTER TABLE table_name change column diff dif float(10,4) ;  --修改列名


    set global local_infile=1;
    show global variables like 'local_infile';
    load data local infile '/root/data/test.sql' ---mysql导数命令
    into table test             # 表名
    character set utf8                 # 编码
    fields terminated by ','           # 分隔符
    lines terminated by '\r\n'         # 换行符,windows下是\r\n
    ignore 1 lines;                    # 忽略第一行,因为表头已建好


    select trx_mysql_thread_id from information_schema.innodb_trx ; --mysql事务

    HIVE

    cd /opt/hive
    wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-3.1.3/apache-hive-3.1.3-bin.tar.gz
    tar -xvf apache-hive-3.1.3-bin.tar.gz
    mv apache-hive-3.1.3-bin hive
    mv apache-hive-3.1.3-bin.tar.gz hive.tar.gz 
    mv hive.tar.gz /opt/pkg/

    环境变量:
    vim /root/.bashrc

    export HIVE_HOME=/opt/hive/hive
    export PATH=$PATH:$HIVE_HOME/bin

    source /root/.bashrc
    hive --version

    cp hive-env.sh.template hive-env.sh

    vim hive-env.sh

    export HADOOP_HOME=/opt/hadoop/hadoop/
    export HIVE_CONF_DIR=/opt/hive/hive/conf

    vim hive-site.xml

    <configuration>
      <property>
         <!-- 查询数据时 显示出列的名字 -->
         <name>hive.cli.print.header</name>
         <value>true</value>
      </property>
      <property>
         <!-- 在命令行中显示当前所使用的数据库 -->
         <name>hive.cli.print.current.db</name>
         <value>true</value>
      </property>
      <property>
         <!-- 默认数据仓库存储的位置,该位置为HDFS上的路径 -->
         <name>hive.metastore.warehouse.dir</name>
         <value>/opt/hive/hive/data/</value>
      </property>
      <!-- 8.x -->
      <property>
         <name>javax.jdo.option.ConnectionURL</name>
         <value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false&amp;serverTimezone=GMT&amp;allowPublicKeyRetrieval=true</value>
      </property>
      <!-- 8.x -->
      <property>
         <name>javax.jdo.option.ConnectionDriverName</name>
         <value>com.mysql.jdbc.Driver</value>
      </property>
      <property>
         <name>javax.jdo.option.ConnectionUserName</name>
         <value>root</value>
      </property>
      <property>
         <name>javax.jdo.option.ConnectionPassword</name>
         <value>123456</value>
      </property>
      <!-- hiveserver2服务的端口号以及绑定的主机名 -->
      <property>
         <name>hive.server2.thrift.port</name>
         <value>10000</value>
      </property>
      <property>
         <name>hive.server2.thrift.bind.host</name>
         <value>localhost</value>
      </property>
    </configuration>



    hadoop fs -mkdir -p /opt/hive/hive/data/
    hadoop fs -chmod g+w /tmp
    hadoop fs -chmod g+w /opt/hive/hive/data/
    schematool -dbType mysql -initSchema


    drop database hive;
    create database hive;
    alter database hive character set latin1;

    hive --debug -hiveconf hive.root.logger=DEBUG,console

    debug

    create table  data (id bigint,user_id string,desc string) PARTITIONED BY (dt stringrow format delimited fields terminated by '|';

    insert into table data partition (dt='20220801')
    select 1 as id,'1' as user_id,'1' as desc
    union all
    select 2 as id,'2' as user_id,'2' as desc
    union all
    select 3 as id,'3' as user_id,'3' as desc
    union all
    select 4 as id,'4' as user_id,'4' as desc
    union all
    select 5 as id,'5' as user_id,'5' as desc;

    Spark

            mkdir /opt/spark/
            cd /opt/spark
            wget https://mirrors.tuna.tsinghua.edu.cn/apache/spark/spark-3.1.3/spark-3.1.3-bin-hadoop3.2.tgz
            tar -xvf spark-3.1.3-bin-hadoop3.2.tgz
            mv spark-3.1.3-bin-hadoop3.2 spark
            mv spark-3.1.3-bin-hadoop3.2.tgz spark.tgz
            mv spark.tgz /opt/pkg/

            cd /opt/spark/spark/conf
            cp spark-env.sh.template spark-env.sh
            vim spark-env.sh
            ---------------------------------------------
            export JAVA_HOME=/opt/java/jdk
            export JRE_HOME=/opt/java/jdk/jre
            export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
            export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH
            export HADOOP_HOME=/opt/hadoop/hadoop
            export PATH=${PATH}:${HADOOP_HOME}/bin

            cp spark-defaults.conf.template spark-defaults.conf
            mkdir /opt/spark/spark/dbdata
            mkdir /opt/spark/spark/dbdata/warehouse
            vim spark-defaults.conf
            --------------------------------------------
            spark.driver.extraJavaOptions -Dderby.system.home=/opt/spark/spark/dbdata
            spark.sql.warehouse.dir=/opt/spark/spark/dbdata/warehouse

            vim /root/.bashrc
            --------------------------------------------
            export SPARK_HOME=/opt/spark/spark
            export PATH=$PATH:$SPARK_HOME/bin

    ZK

        mkdir /opt/zookeeper
        cd /opt/zookeeper
        wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.6.3/apache-zookeeper-3.6.3-bin.tar.gz
        tar -xvf apache-zookeeper-3.6.3-bin.tar.gz
        mv apache-zookeeper-3.6.3-bin zookeeper
        mv apache-zookeeper-3.6.3-bin.tar.gz zookeeper.tar.gz
        mv zookeeper.tar.gz /opt/pkg/
        cd /opt/zookeeper/zookeeper/conf/
        mv zoo_sample.cfg zoo.cfg
        mkdir /opt/zookeeper/zookeeper/data
        vim zoo.cfg
            dataDir=/opt/zookeeper/zookeeper/data/

        cd /opt/zookeeper/zookeeper/bin

        server进程启动:
        ./zkServer.sh start
        ./zkServer.sh stop

        客户端进程启动:
        ./zkCli.sh

    hbase

        不需要单独安装zk
        mkdir /opt/hbase
        cd /opt/hbase
        wget https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/2.5.0/hbase-2.5.0-bin.tar.gz
        tar -xvf hbase-2.5.0-bin.tar.gz
        mv hbase-2.5.0 hbase
        mv hbase-2.5.0-bin.tar.gz hbase.tar.gz
        mv hbase.tar.gz /opt/pkg/
        mkdir /opt/hbase/hbase/data

        cd /opt/hbase/hbase/conf/
        vim hbase-env.sh
            export JAVA_HOME=/opt/java/jdk/
        vim hbase-site.xml

               <property>
                 <name>hbase.cluster.distributed</name>
                 <value>false</value>
               </property>
               <property>
                 <name>hbase.tmp.dir</name>
                 <value>/tmp/hbase/</value>
               </property>
               <property>
                 <name>hbase.unsafe.stream.capability.enforce</name>
                 <value>false</value>
               </property>
               <property>
                    <name>hbase.rootdir</name>
                    <value>file:///opt/hbase/hbase/data</value>
               </property>
               <property>
                    <name>hbase.zookeeper.property.dataDir</name>
                    <value>/root/data/hbase/zookeeper</value>
               </property>
               <property>
               <name>hbase.unsafe.stream.capability.enforce</name>
                     <value>false</value>
               </property>
               <property>
                     <name>hbase.master.info.port</name>
                     <value>60010</value>
               </property>

        vim /root/.bashrc
        export HBASE_HOME=/opt/hbase/hbase
        export PATH=$PATH:$HBASE_HOME/bin

        启动命令:
        cd /opt/hbase/hbase/bin
        ./start-hbase.sh
        ./stop-hbase.sh
        mkdir /opt/flink
        cd /opt/flink/
        wget https://mirrors.tuna.tsinghua.edu.cn/apache/flink/flink-1.15.2/flink-1.15.2-bin-scala_2.12.tgz
        tar -xvf flink-1.15.2-bin-scala_2.12.tgz
        mv flink-1.15.2-bin-scala_2.12.tgz flink_scala_2.12.tgz
        mv flink_scala_2.12.tgz /opt/pkg/
        mv flink-1.15.2 flink

        vim .bashrc
        export FLNK_HOME=/opt/flink/flink
        export PATH=$FLINK_HOME/bin:$PATH

        cd /opt/flink/flink/bin/
        ./start-cluster.sh

        flink run ../examples/batch/WordCount.jar --input /opt/flink/flink/README.txt --output /opt/flink/flink/result

    redis

    mkdir /opt/redis/
    cd /opt/redis/
    wget https://download.redis.io/releases/redis-6.2.6.tar.gz
    mv redis-6.2.6.tar.gz redis.tar.gz
    tar -xvf redis.tar.gz 
    mv redis.tar.gz /opt/pkg/
    mv redis-6.2.6 redis


    cd /opt/redis/redis
    yum -y install gcc automake autoconf libtool make
    make install PREFIX=/opt/redis/redis

    vim redis.conf
    daemonize yes
    bind 0.0.0.0

    ./bin/redis-server redis.conf


    ./bin/redis-cli
    set test hello
    get test

    Kafka

        mkdir /opt/kafka/
        cd /opt/kafka/
        wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.1.2/kafka_2.13-3.1.2.tgz .
        mv kafka_2.13-3.1.2.tgz kafka.tgz
        tar -xvf kafka.tgz
        mv kafka.tgz /opt/pkg/
        mv kafka_2.13-3.1.2 kafka

        cd /opt/kafka/kafka/config

        vim server.properties

        broker.id=0 
    advertised.listeners=PLAINTEXT://公网IP:9092
        log.dirs=/opt/kafka/kafka/log/ #日志存放路径可修改可不修改
    zookeeper.connect=局域网IP:2181 #给公网IP 局域网IP 127.0.0.1添加白名单

        cd /opt/kafka/kafka/bin/
    ##启动ZK
    ./zookeeper-server-start.sh -daemon /opt/kafka/kafka/config/zookeeper.properties  
    ##启动kafka
    ./kafka-server-start.sh -daemon /opt/kafka/kafka/config/server.properties

    ##删除topics
    ./kafka-topics.sh --delete --bootstrap-server localhost:9092 --topic events
    ##创建topics
    ./kafka-topics.sh --create --topic events --bootstrap-server localhost:9092
    ##desc topics
    ./kafka-topics.sh --describe --topic events --bootstrap-server localhost:9092


    ##创建命令行生产者
    ./kafka-console-producer.sh --topic events --bootstrap-server localhost:9092
    ##打印topic
    ./kafka-console-consumer.sh --topic events --from-beginning --bootstrap-server localhost:9092


    datax

        mkdir  /opt/datax
        cd   /opt/datax
        wget https://datax-opensource.oss-cn-hangzhou.aliyuncs.com/202209/datax.tar.gz
        tar -xvf datax.tar.gz 
        mv datax.tar.gz /opt/pkg/
        cd /opt/datax/datax/bin/
        chmod 700 ./datax.py
  • 相关阅读:
    淀粉质模板 Tree
    洛谷 P2197 【模板】nim游戏 解题报告
    洛谷 P3168 [CQOI2015]任务查询系统 解题报告
    洛谷 P2485 [SDOI2011]计算器 解题报告
    洛谷 P4883 mzf的考验 解题报告
    洛谷 P4882 lty loves 96! 解题报告
    牛客 NOIp模拟1 T1 中位数 解题报告
    牛客 NOIp模拟1 T3 保护 解题报告
    洛谷 P3349 [ZJOI2016]小星星 解题报告
    洛谷 P4139 上帝与集合的正确用法 解题报告
  • 原文地址:https://www.cnblogs.com/wuxiaolong4/p/16548910.html
Copyright © 2020-2023  润新知