热门标签 | HotTags
当前位置:  开发笔记 > 编程语言 > 正文

shell脚本一键安装简易版

准备工作:一台可以连外网且可以连xshell的机器,centos7镜像mkdiroptdownload,且将以下版本软件传到downlo

准备工作:


  • 一台可以连外网且可以连xshell的机器,centos 7镜像
  • mkdir /opt/download,且将以下版本软件传到download目录下
    apache-flume-1.8.0-bin.tar.gz
    jdk-8u111-linux-x64.tar.gz
    hadoop-2.6.0-cdh5.14.2.tar.gz
    kafka_2.11-0.11.0.2.gz
    hadoop-native-64-2.6.0.tar
    mysql-connector-java-5.1.32.jar
    hbase-1.2.0-cdh5.14.2.tar.gz
    sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz
    hive-1.1.0-cdh5.14.2.tar.gz
    zookeeper-3.4.5-cdh5.14.2.tar.gz
  • 新建.sh脚本,授权x,对以下代码修改IP和映射名,执行
  • hadoop格式化时,需要连续手动输入三次yes,此处暂未解决

#!/bin/bashmkdir /opt/software#mysql
RST=`rpm -qa | grep mariadb`
if $RST; thenyum -y remove $RST
fi
yum -y install wget
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum -y install mysql-server
cat >/etc/my.cnf<<EOF
[client]
default-character-set&#61;utf8[mysqld]
default-storage-engine&#61;INNODB
character-set-server&#61;utf8
collation-server&#61;utf8_general_ci[mysql]
default-character-set&#61;utf8
EOF

systemctl restart mysql
systemctl status mysql#通过配置文件的方法跳过MySQL的登录检查&#xff0c;修改后要删掉次配置
&#96;sed -i &#39;/sql_mode&#61;/i\\skip-grant-tables&#39; /etc/my.cnf&#96;
&#96;systemctl restart mysql&#96;
#modify password
&#96;mysql -uroot -e "use mysql;update user set password&#61;password(&#39;root&#39;) where user&#61;&#39;root&#39;;flush privileges;grant all on *.* to root&#64;&#39;%&#39; identified by &#39;root&#39;;"&#96;
&#96;sed -i &#39;/skip-grant-tables/d&#39; /etc/my.cnf&#96;#jdk
cd /opt/software
tar -zxf /opt/download/jdk-8u111-linux-x64.tar.gz
mv jdk1.8.0_111 jdk180echo &#39;#java&#39;>>/etc/profile.d/my.sh
echo "export JAVA_HOME&#61;/opt/software/jdk180">>/etc/profile.d/my.sh
echo &#39;export PATH&#61;$JAVA_HOME/bin:$PATH&#39;>>/etc/profile.d/my.sh
echo &#39;export CLASS_PATH&#61;.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar&#39;>>/etc/profile.d/my.shsource /etc/profile
echo $JAVA_HOME
java -version#免密
echo "192.168.221.205 single11">>/etc/hosts
yum -y install expect
expect << EOF
spawn ssh-keygen -t rsa
expect {"save the key" {send "\r";exp_continue}"Overwrite" {send "y\r";exp_continue}"Enter passphrase" {send "\r";exp_continue}"same passphrase" {send "\r"}
}
expect eof
EOF

cd /root/.ssh
cat id_rsa.pub >> authorized_keys#hadoop
#配置时钟同步
yum -y install ntp
echo &#39;*/1 * * * * /usr/sbin/ntpdate -u pool.ntp.org;clock -w&#39;>/var/spool/cron/root
systemctl stop ntpd
systemctl start ntpd
systemctl status ntpd
systemctl enable ntpd.service
#安装hadoop
cd /opt/software
tar -zxf /opt/download/hadoop-2.6.0-cdh5.14.2.tar.gz
mv hadoop-2.6.0-cdh5.14.2 hadoop260
cd /opt/software/hadoop260/lib/native/
tar -xf /opt/download/hadoop-native-64-2.6.0.tar
cd /opt/software
chown -R root:root hadoop260#配置环境变量
echo &#39;#hadoop&#39;>>/etc/profile.d/my.sh
echo "export HADOOP_HOME&#61;/opt/software/hadoop260">>/etc/profile.d/my.sh
echo &#39;export PATH&#61;$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib:$PATH&#39;>>/etc/profile.d/my.sh
source /etc/profile
echo $HADOOP_HOME
#配置hadoop-env.sh
cd /opt/software/hadoop260/etc/hadoop
sed -i &#39;/export JAVA_HOME&#61;${JAVA_HOME}/ s/${JAVA_HOME}/\/opt\/software\/jdk180/&#39; /opt/software/hadoop260/etc/hadoop/hadoop-env.sh
#配置core-site.xml
coresitetxt&#61;"

hadoop.tmp.dir
/opt/software/hadoop260


fs.defaultFS
hdfs://192.168.221.205:9000



hadoop.proxyuser.root.groups
*



hadoop.proxyuser.root.hosts
*

"

for i in $coresitetxt
doitem&#61;&#96;echo $i&#96;&#96;eval "sed -i &#39;/<\/configuration>/i\$item&#39; core-site.xml"&#96;
done
#配置hdfs-site.xml
hdfssitetxt&#61;"

dfs.replication
1


dfs.namenode.name.dir
/opt/software/hadoop260/tmp/name


dfs.datanode.data.dir
/opt/software/hadoop260/tmp/data

"

for i in $hdfssitetxt
doitem&#61;&#96;echo $i&#96;&#96;eval "sed -i &#39;/<\/configuration>/i\$item&#39; hdfs-site.xml"&#96;
done
#配置mapred-site.txt
cp mapred-site.xml.template mapred-site.xml
mapredsitetxt&#61;"

mapreduce.framework.name
yarn

"

for i in $mapredsitetxt
doitem&#61;&#96;echo $i&#96;&#96;eval "sed -i &#39;/<\/configuration>/i\$item&#39; mapred-site.xml"&#96;
done#配置yarn-site.xml
yarnsitetxt&#61;"

yarn.nodemanager.aux-services
mapreduce_shuffle

"

for i in $yarnsitetxt
doitem&#61;&#96;echo $i&#96;&#96;eval "sed -i &#39;/<\/configuration>/i\$item&#39; mapred-site.xml"&#96;
done
cd /opt/software/hadoop260/sbin/expect << EOF
spawn hdfs namenode -format
expect {"ECDSA key fingerprint is" {send "yes\r";exp_continue}"ECDSA key fingerprint is" {send "yes\r";exp_continue}"ECDSA key fingerprint is" {send "yes\r"}
}
expect eof
EOF
source /etc/profile
start-dfs.sh
start-yarn.sh
jpscd /opt/software
tar -zxf /opt/download/hive-1.1.0-cdh5.14.2.tar.gz
mv hive-1.1.0-cdh5.14.2 hive110
cd /opt/software/hive110/lib/
cp /opt/download/mysql-connector-java-5.1.32.jar ./
touch /opt/software/hive110/conf/hive-site.xml
cat >/opt/software/hive110/conf/hive-site.xml<<EOF




hive.metastore.warehouse.dir
/opt/software/hive110/warehouse


javax.jdo.option.ConnectionURL
jdbc:mysql://192.168.221.221:3306/hive110?createDatabaseIfNotExist&#61;true


javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver


javax.jdo.option.ConnectionUserName
root


javax.jdo.option.ConnectionPassword
root


hive.metastore.local
true


hive.server2.authentication
NONE
Expects one of [nosasl, none, ldap, kerberos, pam, custom].Client authentication types.NONE: no authentication checkLDAP: LDAP/AD based authenticationKERBEROS: Kerberos/GSSAPI authenticationCUSTOM: Custom authentication provider(Use with property hive.server2.custom.authentication.class)PAM: Pluggable authentication moduleNOSASL: Raw transport



hive.server2.thrift.client.user
root
Username to use against thrift client


hive.server2.thrift.client.password
root
Password to use against thrift client


EOF

echo &#39;#hive&#39;>>/etc/profile.d/my.sh
echo "export HIVE_HOME&#61;/opt/software/hive110">>/etc/profile.d/my.sh
echo &#39;export PATH&#61;$HIVE_HOME/bin:$PATH&#39;>>/etc/profile.d/my.sh
source /etc/profile
echo $HIVE_HOME
cd /opt/software/hive110/bin/
sleep 6s
./schematool -dbType mysql -initSchema
sleep 6s
nohup hive --service metastore>/dev/null 2>&1 &
sleep 6s
nohup hive --service hiveserver2>/dev/null 2>&1 &
sleep 6s
jpscd /opt/software
tar -zxf /opt/download/zookeeper-3.4.5-cdh5.14.2.tar.gz
mv zookeeper-3.4.5-cdh5.14.2/ zookeeper345
cd zookeeper345/conf/
cp zoo_sample.cfg zoo.cfgsed -i &#39;/dataDir&#61;\/tmp\/zookeeper/ s/\/tmp\/zookeeper/\/opt\/software\/zookeeper345\/mydata/&#39; /opt/software/zookeeper345/conf/zoo.cfg
sed -i &#39;/# the port at which the clients will connect/i\server.1&#61;single11:2888:3888&#39; /opt/software/zookeeper345/conf/zoo.cfgmkdir /opt/software/zookeeper345/mydata
touch /opt/software/zookeeper345/mydata/myid
echo "1">/opt/software/zookeeper345/mydata/myidecho "export ZK_HOME&#61;/opt/software/zookeeper345">>/etc/profile.d/my.sh
echo &#39;export PATH&#61;$ZK_HOME/bin:$PATH&#39;>>/etc/profile.d/my.sh
source /etc/profile
zkServer.sh start
zkServer.sh status
jpscd /opt/software/
tar -zxf /opt/download/hbase-1.2.0-cdh5.14.2.tar.gz
mv hbase-1.2.0-cdh5.14.2/ hbase120
chown -R root:root /opt/software/hbase120
cd /opt/software/hbase120/conf/sed -i &#39;28 i\export JAVA_HOME&#61;/opt/software/jdk180\&#39; /opt/software/hbase120/conf/hbase-env.sh
sed -i &#39;
29 i\export HBASE_MANAGES_ZK&#61;false\&#39; /opt/software/hbase120/conf/hbase-env.shhbasesitetxt&#61;"

hbase.rootdir
hdfs://192.168.221.221:9000/hbase


hbase.cluster.distributed
true


hbase.zookeeper.property.clientPort
2181


hbase.zookeeper.property.dataDir
/opt/software/zookeeper345/mydata

"
for i in $hbasesitetxt
doitem&#61;$i&#96;eval "sed -i &#39;/<\/configuration>/i\$item&#39; hbase-site.xml"&#96;
doneecho "export HBASE_HOME&#61;/opt/software/hbase120">>/etc/profile.d/my.sh
echo &#39;
export PATH&#61;$HBASE_HOME/bin:$PATH&#39;>>/etc/profile.d/my.sh
source /etc/profile
start-hbase.sh
jps#sqoop
cd /opt/software
tar -zxf /opt/download/sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz
mv sqoop-1.4.6.bin__hadoop-2.0.4-alpha/ sqoop146
cd /opt/software/sqoop146/lib/
cp /opt/download/mysql-connector-java-5.1.32.jar ./
cp /opt/software/hadoop260/share/hadoop/common/hadoop-common-2.6.0-cdh5.14.2.jar ./
cp /opt/software/hadoop260/share/hadoop/hdfs/hadoop-hdfs-2.6.0-cdh5.14.2.jar ./
cp /opt/software/hadoop260/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0-cdh5.14.2.jar ./
cd /opt/software/sqoop146/conf/
cp sqoop-env-template.sh sqoop-env.shcat >>sqoop-env.sh <#Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME&#61;/opt/software/hadoop260
#Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME&#61;/opt/software/hadoop260/share/hadoop/mapreduce
#set the path to where bin/hbase is available
export HBASE_HOME&#61;/opt/software/hbase120
#Set the path to where bin/hive is available
export HIVE_HOME&#61;/opt/software/hive110
#Set the path for where zookeper config dir is
export ZOOCFGDIR&#61;/opt/software/zookeeper345
EOF
cd /opt/software/sqoop146
mkdir mylog
echo "#sqoop">>/etc/profile.d/my.sh
echo "export SQ_HOME&#61;/opt/software/sqoop146">>/etc/profile.d/my.sh
echo &#39;
export PATH&#61;$SQ_HOME/bin:$PATH&#39;>>/etc/profile.d/my.sh
echo &#39;
export LOGDIR&#61;$SQ_HOME/mylog/&#39;>>/etc/profile.d/my.sh
source /etc/profilesqoop list-databases --connect jdbc:mysql://192.168.221.221:3306 --username root --password root

可以看到所有服务都有开启&#xff0c;且sqoop可以正常连接数据库
在这里插入图片描述


推荐阅读
author-avatar
小Reve_942
这个家伙很懒,什么也没留下!
PHP1.CN | 中国最专业的PHP中文社区 | DevBox开发工具箱 | json解析格式化 |PHP资讯 | PHP教程 | 数据库技术 | 服务器技术 | 前端开发技术 | PHP框架 | 开发工具 | 在线工具
Copyright © 1998 - 2020 PHP1.CN. All Rights Reserved | 京公网安备 11010802041100号 | 京ICP备19059560号-4 | PHP1.CN 第一PHP社区 版权所有