搭建一个Ubuntu HDFS集群涉及多个步骤,从基础安装到配置各个服务,再到启动集群。以下是一个详细的步骤指南:
sudo apt update
sudo apt install vim ntpdate
sudo ntpdate cn.pool.ntp.org
sudo adduser hadoop
sudo vi /etc/sudoers
# 将root那行复制后修改成hadoop,hadoop就有了root权限
ssh-keygen
ssh-copy-id (your-slave-ip1)
ssh-copy-id (your-slave-ip2)
ssh-copy-id (your-slave-ip3)
sudo apt-get install -y openjdk-8-jdk
sudo update-java-alternatives --set java-1.8.0-openjdk
java -version
vi ~/.bashrc
export JAVA_HOME /usr/lib/jvm/java-1.8.0-openjdk-amd64
export JRE_HOME ${JAVA_HOME}/jre
export CLASSPATH .:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH ${JAVA_HOME}/bin:$PATH
source ~/.bashrc
wget https://archive.apache.org/dist/hadoop/common/hadoop-2.7.7/hadoop-2.7.7.tar.gz
tar xvf hadoop-2.7.7.tar.gz
~/.bashrc
文件,添加Hadoop环境变量:export HADOOP_HOME /home/hadoop/hadoop-2.7.7
export PATH=$PATH:$HADOOP_HOME/bin
source ~/.bashrc
core-site.xml
:<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/hdata</value>
</property>
</configuration>
hdfs-site.xml
:<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>(your-master-ip):50090</value>
</property>
<property>
<name>dfs.datanode.fsdataset.volume.choosing.policy</name>
<value>org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy</value>
</property>
</configuration>
mapred-site.xml
:<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
yarn-site.xml
:<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>(your-master-ip)</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
hdfs namenode -format
cd $HADOOP_HOME/sbin
./start-dfs.sh
hdfs dfsadmin -report
http://namenode-host:50070