1
0

脚本.md 3.6 KB

修改ssh配置

cd /etc
chmod 777 sudoers
vim sudoers
# 注释掉 Default requiretty 一行    ------  第56行
chmod 600 sudoers

环境变量

cd /etc/profile.d
vim myenv.sh
# JAVA
export JAVA_HOME=/opt/modules/java
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/jre/lib/rt.jar
export PATH=$PATH:$JAVA_HOME/bin

# spark
export SPARK_HOME=/opt/modules/spark-3.1.2
export PATH=$PATH:$SPARK_HOME/bin

# scala
export SCALA_HOME=/opt/modules/scala-2.12.15
export PATH=$PATH:$SCALA_HOME/bin

# minio
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123

# HADOOP_HOME
export HADOOP_HOME=/opt/modules/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin

# HIVE_HOME
export HIVE_HOME=/opt/modules/hive
export PATH=$PATH:$HIVE_HOME/bin

# ZOOKEEPER_HOME
export ZOOKEEPER_HOME=/opt/modules/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_HOME/bin

# KAFKA_HOME
export KAFKA_HOME=/opt/modules/kafka_2.11-2.1.1
export PATH=$PATH:$KAFKA_HOME/bin

# MYSQL_HOME
export MYSQL_HOME=/opt/modules/mysql
export PATH=$PATH:$MYSQL_HOME/bin

# HBASE_HOME
export HBASE_HOME=/opt/modules/hbase-2.2.6
export PATH=$PATH:$HBASE_HOME/bin
export HBASE_LIBRARY_PATH=/opt/modules/hbase-2.2.6/lib/native/Linux-amd64-64

不要忘了source一下!不要忘了source一下!不要忘了source一下!

不要忘了source一下!不要忘了source一下!不要忘了source一下!

不要忘了source一下!不要忘了source一下!不要忘了source一下!

source /etc/profile

启动脚本---(脚本名字随便取,务必以sh结尾,eg start.sh)

#! /bin/bash

## 三台机器都有环境变量是前提,否则修改脚本中启动命令为全路径
## start zookeeper cluster 
ssh lab1 zkServer.sh start
ssh lab2 zkServer.sh start
ssh lab3 zkServer.sh start

## start hdfs
start-dfs.sh

## start yarn
ssh lab2 start-yarn.sh

## start hbase
start-hbase.sh

## start kafka
ssh lab1 kafka-server-start.sh -daemon /opt/modules/kafka_2.11-2.1.1/config/server.properties
ssh lab2 kafka-server-start.sh -daemon /opt/modules/kafka_2.11-2.1.1/config/server.properties
ssh lab3 kafka-server-start.sh -daemon /opt/modules/kafka_2.11-2.1.1/config/server.properties

## start solr---------需要修改ssh配置
ssh lab1 sudo -i -u solr /opt/modules/solr/bin/solr start
ssh lab2 sudo -i -u solr /opt/modules/solr/bin/solr start
ssh lab3 sudo -i -u solr /opt/modules/solr/bin/solr start

# start atlas
ssh lab2 /opt/modules/atlas-2.2.0/bin/atlas_start.py

关闭脚本---(脚本名字随便取,务必以sh结尾,eg stop.sh)

#! /bin/bash
## 三台机器都有环境变量是前提,否则修改脚本中启动命令为全路径

# stop atlas
ssh lab2 /opt/modules/atlas-2.2.0/bin/atlas_start.py

ssh lab1 sudo -i -u solr /opt/modules/solr/bin/solr stop
ssh lab2 sudo -i -u solr /opt/modules/solr/bin/solr stop
ssh lab3 sudo -i -u solr /opt/modules/solr/bin/solr stop


## stop kafka
ssh lab1 kafka-server-stop.sh
ssh lab2 kafka-server-stop.sh
ssh lab3 kafka-server-stop.sh

## stop hbase
stop-hbase.sh

## stop yarn
ssh lab2 stop-yarn.sh

## stop hdfs
stop-dfs.sh

## stop zookeeper cluster 
ssh lab1 zkServer.sh stop
ssh lab2 zkServer.sh stop
ssh lab3 zkServer.sh stop

监控脚本

cd /usr/local/bin
touch jpsall
chmod +x jpsall
vim jpsall

#!/bin/bash
# 执行jps命令查询每台服务器上的节点状态
echo ======================集群节点状态====================

for i in lab1 lab2 lab3
do
        echo ====================== $i ====================
        ssh root@$i '/opt/modules/java/bin/jps'
done
echo ======================执行完毕====================