hadoop完全分布式一键安装、启动、停止脚本

释放双眼,带上耳机,听听看~!

 hadoop完全分布式一键安装脚本


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
1#!/bin/bash
2#配置HADOOP的安装目录 修改的地方1 脚本可以自己创建
3##在windows编写的代码可能运行有问题执行以下 1>vim redisshell.sh  2>:set ff=unix
4
5echo -e "请输入hadoop的安装目录,不存在脚本自动创建,最后一个/不要写 /bigdata/install"
6read esinstallpath
7
8#创建HADOOP安装的目录
9if [ ! -d $esinstallpath ]; then
10   mkdir -p $esinstallpath
11fi
12if [ ! -d $esinstallpath ]; then
13  echo "创建目录$esinstallpat失败!请检查目录是否有权限"
14  exit
15fi
16
17#解压tar包
18currentdir=$(cd $(dirname $0); pwd)
19ls | grep 'hadoop-.*[gz]$'
20if [ $? -ne 0 ]; then
21   #当前目录没有hadoop的压缩包
22   echo "在$currentdir下没有发现hadoop-*.tar.gz,请自行上传!"
23   exit
24else
25   #解压
26   tar -zxvf $currentdir/$(ls | grep 'hadoop-.*[gz]$') -C $esinstallpath
27fi
28
29esbanben=`ls $esinstallpath| grep 'hadoop-.*'`
30
31#PATH设置
32#末行插入
33echo "">>~/.bash_profile
34echo '#HADOOP'>>~/.bash_profile
35echo "export HADOOP_HOME=$esinstallpath/$esbanben">>~/.bash_profile
36echo 'export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>~/.bash_profile
37source ~/.bash_profile
38
39hadooppath=$esinstallpath/$esbanben
40confpath=$esinstallpath/$esbanben/etc/hadoop
41
42javahome=`echo $JAVA_HOME`
43bak_dir='export JAVA_HOME=${JAVA_HOME}'
44new_dir='export JAVA_HOME='$javahome
45sed -i "s!${bak_dir}!${new_dir}!g" $confpath/hadoop-env.sh
46
47
48#修改core-site.xml
49echo -e "请输入hadoop集群服务器名称 例如 mycluster"
50read mycluster
51sed -i '/<\/configuration>/i\<!--配置hadoop集群服务器名称 -->' $confpath/core-site.xml
52sed -i '/<\/configuration>/i\<property>' $confpath/core-site.xml
53sed -i '/<\/configuration>/i\  <name>fs.defaultFS</name>' $confpath/core-site.xml
54sed -i "/<\/configuration>/i\  <value>hdfs://$mycluster</value>" $confpath/core-site.xml
55sed -i "/<\/configuration>/i\</property>" $confpath/core-site.xml
56
57echo -e "请输入hadoop运行时产生文件的存储目录 不需要自己创建目录 集群自己会创建 指定一个空间大的 用来保存hadoop数据 例如 /bigdata/hadoop"
58read hadooptmpdir
59sed -i '/<\/configuration>/i\<!--hadoop运行时产生文件的存储目录 -->' $confpath/core-site.xml
60sed -i '/<\/configuration>/i\<property>' $confpath/core-site.xml
61sed -i '/<\/configuration>/i\  <name>hadoop.tmp.dir</name>' $confpath/core-site.xml
62sed -i "/<\/configuration>/i\  <value>$hadooptmpdir</value>" $confpath/core-site.xml
63sed -i "/<\/configuration>/i\</property>" $confpath/core-site.xml
64
65echo -e "请输入zk集群 例如 cdh01:2181,cdh02:2181,cdh03:2181"
66read zkhosts
67sed -i '/<\/configuration>/i\<!--配置zookeeper集群,容灾 -->' $confpath/core-site.xml
68sed -i '/<\/configuration>/i\<property>' $confpath/core-site.xml
69sed -i '/<\/configuration>/i\  <name>ha.zookeeper.quorum</name>' $confpath/core-site.xml
70sed -i "/<\/configuration>/i\  <value>$zkhosts</value>" $confpath/core-site.xml
71sed -i "/<\/configuration>/i\</property>" $confpath/core-site.xml
72
73echo "core-site.xml 配置如下"
74
75cat $confpath/core-site.xml
76
77echo "core-site.xml 配置完成"
78sleep 1
79#开始配置hdfs-site.xml
80sed -i '/<\/configuration>/i\<!-- 集群服务名称  -->' $confpath/hdfs-site.xml
81sed -i '/<\/configuration>/i\<property>' $confpath/hdfs-site.xml
82sed -i '/<\/configuration>/i\  <name>dfs.nameservices</name>' $confpath/hdfs-site.xml
83sed -i "/<\/configuration>/i\  <value>$mycluster</value>" $confpath/hdfs-site.xml
84sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
85
86sed -i '/<\/configuration>/i\<!-- 名称节点配置  -->' $confpath/hdfs-site.xml
87sed -i '/<\/configuration>/i\<property>' $confpath/hdfs-site.xml
88sed -i "/<\/configuration>/i\  <name>dfs.ha.namenodes.$mycluster</name>" $confpath/hdfs-site.xml
89sed -i "/<\/configuration>/i\  <value>nn1,nn2</value>" $confpath/hdfs-site.xml
90sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
91
92
93echo  "请输入两个nameNode的地址,按照空格分开 例如 cdh01 cdh02"
94read nameNodes
95array=(`echo $nameNodes | tr ' ' ' '` )
96
97for i  in `seq 0 $((${#array[@]}-1))`
98do
99 sed -i "/<\/configuration>/i\<!-- 指定nn$((${i}+1))机器的RPC服务完整监听地址  -->" $confpath/hdfs-site.xml
100 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
101 sed -i "/<\/configuration>/i\  <name>dfs.namenode.rpc-address.$mycluster.nn$((${i}+1))</name>" $confpath/hdfs-site.xml
102 sed -i "/<\/configuration>/i\  <value>${array[${i}]}:8020</value>" $confpath/hdfs-site.xml
103 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
104
105 sed -i "/<\/configuration>/i\<!-- 指定nn$((${i}+1))机器的http服务地址  -->" $confpath/hdfs-site.xml
106 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
107 sed -i "/<\/configuration>/i\  <name>dfs.namenode.http-address.$mycluster.nn$((${i}+1))</name>" $confpath/hdfs-site.xml
108 sed -i "/<\/configuration>/i\  <value>${array[${i}]}:50070</value>" $confpath/hdfs-site.xml
109 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
110
111 #配置yarn的环境
112 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
113 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.hostname.rm$((${i}+1))</name>" $confpath/yarn-site.xml
114 sed -i "/<\/configuration>/i\  <value>${array[${i}]}</value>" $confpath/yarn-site.xml
115 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
116
117 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
118 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.webapp.address.rm$((${i}+1))</name>" $confpath/yarn-site.xml
119 sed -i "/<\/configuration>/i\  <value>${array[${i}]}:8088</value>" $confpath/yarn-site.xml
120 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
121
122
123done
124
125echo  "指定journalnode的配置,所有机器 例如 cdh01:8485;cdh02:8485;cdh03:8485;cdh04:8485;cdh05:8485"
126read journalnode
127 sed -i "/<\/configuration>/i\<!-- 指定journalnode  -->" $confpath/hdfs-site.xml
128 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
129 sed -i "/<\/configuration>/i\  <name>dfs.namenode.shared.edits.dir</name>" $confpath/hdfs-site.xml
130 sed -i "/<\/configuration>/i\  <value>qjournal://$journalnode/$mycluster</value>" $confpath/hdfs-site.xml
131 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
132
133
134 sed -i "/<\/configuration>/i\<!--   -->" $confpath/hdfs-site.xml
135 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
136 sed -i "/<\/configuration>/i\  <name>dfs.client.failover.proxy.provider.$mycluster</name>" $confpath/hdfs-site.xml
137 sed -i "/<\/configuration>/i\  <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>" $confpath/hdfs-site.xml
138 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
139
140
141
142 sed -i "/<\/configuration>/i\<!--  在故障切换期间将用于隔离Active NameNode的脚本 -->" $confpath/hdfs-site.xml
143 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
144 sed -i "/<\/configuration>/i\  <name>dfs.ha.fencing.methods</name>" $confpath/hdfs-site.xml
145 sed -i "/<\/configuration>/i\  <value>" $confpath/hdfs-site.xml
146 sed -i "/<\/configuration>/i\  sshfence" $confpath/hdfs-site.xml
147 sed -i "/<\/configuration>/i\  shell(/bin/true)" $confpath/hdfs-site.xml
148 sed -i "/<\/configuration>/i\  </value>" $confpath/hdfs-site.xml
149 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
150
151 sed -i "/<\/configuration>/i\<!-- 免密  -->" $confpath/hdfs-site.xml
152 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
153 sed -i "/<\/configuration>/i\  <name>dfs.ha.fencing.ssh.private-key-files</name>" $confpath/hdfs-site.xml
154 sed -i "/<\/configuration>/i\  <value>/home/.ssh/id_rsa</value>" $confpath/hdfs-site.xml
155 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
156
157
158 sed -i "/<\/configuration>/i\<!-- Journal Node文件存储地址 -->" $confpath/hdfs-site.xml
159 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
160 sed -i "/<\/configuration>/i\  <name>dfs.journalnode.edits.dir</name>" $confpath/hdfs-site.xml
161 sed -i "/<\/configuration>/i\  <value>$hadooptmpdir/journal</value>" $confpath/hdfs-site.xml
162 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
163
164 sed -i "/<\/configuration>/i\<!--是否开启自动failover机制-->" $confpath/hdfs-site.xml
165 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
166 sed -i "/<\/configuration>/i\  <name>dfs.ha.automatic-failover.enabled</name>" $confpath/hdfs-site.xml
167 sed -i "/<\/configuration>/i\  <value>true</value>" $confpath/hdfs-site.xml
168 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
169
170 sed -i "/<\/configuration>/i\<!--HDFS文件系统每个block冗余备份数-->" $confpath/hdfs-site.xml
171 sed -i "/<\/configuration>/i\<property>" $confpath/hdfs-site.xml
172 sed -i "/<\/configuration>/i\  <name>dfs.replication</name>" $confpath/hdfs-site.xml
173 sed -i "/<\/configuration>/i\  <value>3</value>" $confpath/hdfs-site.xml
174 sed -i "/<\/configuration>/i\</property>" $confpath/hdfs-site.xml
175
176
177echo "hdfs-site.xml 配置如下"
178cat $confpath/hdfs-site.xml
179echo "hdfs-site.xml 配置完成"
180sleep 1
181
182#开始配置mapred-site.xml
183#mapred-site.xml
184cp $confpath/mapred-site.xml.template $confpath/mapred-site.xml
185 sed -i "/<\/configuration>/i\<!--HDFS文件系统每个block冗余备份数-->" $confpath/mapred-site.xml
186 sed -i "/<\/configuration>/i\<property>" $confpath/mapred-site.xml
187 sed -i "/<\/configuration>/i\  <name>mapreduce.framework.name</name>" $confpath/mapred-site.xml
188 sed -i "/<\/configuration>/i\  <value>yarn</value>" $confpath/mapred-site.xml
189 sed -i "/<\/configuration>/i\</property>" $confpath/mapred-site.xml
190
191echo "mapred-site.xml 配置如下"
192cat $confpath/mapred-site.xml
193echo "mapred-site.xml 配置完成"
194sleep 1
195
196
197#开始配置yarn-site.xml
198 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
199 sed -i "/<\/configuration>/i\  <name>yarn.nodemanager.aux-services</name>" $confpath/yarn-site.xml
200 sed -i "/<\/configuration>/i\  <value>mapreduce_shuffle</value>" $confpath/yarn-site.xml
201 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
202
203 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
204 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.ha.enabled</name>" $confpath/yarn-site.xml
205 sed -i "/<\/configuration>/i\  <value>true</value>" $confpath/yarn-site.xml
206 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
207
208 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
209 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.ha.rm-ids</name>" $confpath/yarn-site.xml
210 sed -i "/<\/configuration>/i\  <value>rm1,rm2</value>" $confpath/yarn-site.xml
211 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
212
213
214 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
215 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.zk-address</name>" $confpath/yarn-site.xml
216 sed -i "/<\/configuration>/i\  <value>$zkhosts</value>" $confpath/yarn-site.xml
217 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
218
219 sed -i "/<\/configuration>/i\<property>" $confpath/yarn-site.xml
220 sed -i "/<\/configuration>/i\  <name>yarn.resourcemanager.cluster-id</name>" $confpath/yarn-site.xml
221 sed -i "/<\/configuration>/i\  <value>mycluster1</value>" $confpath/yarn-site.xml
222 sed -i "/<\/configuration>/i\</property>" $confpath/yarn-site.xml
223
224echo "yarn-site.xml 配置如下"
225cat $confpath/yarn-site.xml
226echo "yarn-site.xml 配置完成"
227sleep 1
228
229 #开始配置slaves
230
231echo  "请输入所有的datanode,按照空格分开 例如 cdh01 cdh02 cdh03"
232read datanodes
233array=(`echo $datanodes | tr ' ' ' '` )
234touch  $confpath/slaves
235for datanode in ${array[@]}
236do
237 echo $datanode >>  $confpath/slaves
238done
239
240echo "slaves 配置如下"
241cat $confpath/slaves
242echo "slaves 配置完成"
243sleep 1
244
245#分发hadoop安装文件
246echo "以下输入的节点必须做免密登录"
247echo  "请输入所有的hadoop节点,按照空格分开,当前机器不用输入(当前机器是cdh01) 例如 cdh02 cdh03 cdh04 cdh05"
248read allnodes
249user=`whoami`
250array=(`echo $allnodes | tr ' ' ' '` )
251for allnode in ${array[@]}
252do
253 echo ======= $allnode  =======
254 ssh $allnode "echo ''>>~/.bash_profile"
255 ssh $allnode "echo '#HADOOP'>>~/.bash_profile"
256 ssh $allnode "echo 'export HADOOP_HOME=$hadooppath'>>~/.bash_profile"
257 ssh $allnode 'echo "export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin">>~/.bash_profile'
258 ssh $allnode "source ~/.bash_profile"
259 ssh $allnode "rm -rf $hadooppath"
260 ssh $allnode "mkdir -p $hadooppath"
261 scp -r $hadooppath/* ${user}@$allnode:$hadooppath/
262 echo ======= $allnode 复制完成  =======
263done
264
265for allnode in ${array[@]}
266do
267 echo ======= 在 $allnode 手动执行 source ~/.bash_profile 在通过 hadoop version 查看是否安装成功 =======
268done
269
270

 

hadoop完全分布式一键启动脚本


1
2
3
4
5
6
7
8
9
10
11
12
13
1#!/bin/bash
2#启动所有的hadoop
3slaveNode='cdh02'
4source ~/.bash_profile
5start-all.sh
6sleep 2
7#启动另一台机器的resourcemanager
8ssh -T $slaveNode <<EOF
9    source ~/.bash_profile
10    yarn-daemon.sh start resourcemanager
11EOF
12echo  $slaveNode 启动resourcemanager...[ done ]
13

 

hadoop完全分布式一键停止脚本


1
2
3
4
5
6
7
8
9
10
11
1#!/bin/bash
2#停止hadoop
3standbyNode='cdh02'
4source ~/.bash_profile
5stop-all.sh
6ssh -T $standbyNode <<EOF
7    source ~/.bash_profile
8    yarn-daemon.sh stop resourcemanager
9EOF
10echo  $standbyNode 停止resourcemanager...[ done ]
11

 

 

给TA打赏
共{{data.count}}人
人已打赏
安全技术安全运维

Windows服务器如何发现被黑

2018-5-20 12:24:31

安全技术

用node.js做cluster,监听异常的邮件提醒服务

2021-12-21 16:36:11

个人中心
购物车
优惠劵
今日签到
有新私信 私信列表
搜索