java写hdfs程序

发布时间:2020-07-24 23:19:04 作者:zjy1002261870
来源:网络 阅读:468

1、hadoop默认临时数据文件是存储于Unix的tmp目录下(cd /tmp 包含hadoop-root等文件),如果不进行修改,linux系统重启后hadoop有可能出现不正常现象;故需要修改hadoop的临时文件存放目录
2、vim core-site.xml 配置如下,然后重启hadoop集群,不要对namenode重新进行格式化操作
修改datanode /var/hadoop/dfs/data/current 目录下VERSION文件的clusterid与namenode一致;然后启动集群正常
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
在namenode执行格式化操作后,会导致namenode重新生成clusterid,而datanode的clusterID值没变,
namenode与datanode clusterid不一致导致datanode启动异常;需要手动改成与namenode一致
3、测试时,可以关闭权限检查(否则没有权限访问),在namenode节点添加如下配置
vim hdfs-site.xml
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.skcc</groupId>
<artifactId>wordcount</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>wordcount</name>
<description>count the word</description>

<properties>
    <project.build.sourceencoding>UTF-8</project.build.sourceencoding>
    <hadoop.version>2.7.3</hadoop.version>
</properties>
<dependencies>
    <dependency>
        <groupId>junit</groupId>
        <artifactId>junit</artifactId>
        <version>4.12</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>${hadoop.version}</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-common</artifactId>
        <version>${hadoop.version}</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-hdfs</artifactId>
        <version>${hadoop.version}</version>
    </dependency>
</dependencies>

</project>

package com.skcc.hadoop;

import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.text.NumberFormat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;

public class HelloHDFS {

public HelloHDFS() {
    // TODO Auto-generated constructor stub
}

public static FileSystem getFileSystemInstance() {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", "hdfs://172.26.19.40:9000");
    FileSystem fileSystem = null;
    try {
        fileSystem = FileSystem.get(conf);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return fileSystem;
}

public static void getFileFromHDFS() throws Exception {
    //URL 默认处理http协议, FsUrlStreamHandlerFactory 处理hdfs协议
    URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
    URL url=new URL("hdfs://172.26.19.40:9000/10803060234.txt");
    InputStream inputStream= url.openStream();
    IOUtils.copyBytes(inputStream, System.out, 4096,true);
}

public static void getFileFromBaiDu() throws IOException {

    URL url=new URL("http://skynet.skhynix-cq.com.cn/plusWare/Main.aspx");
    InputStream inputStream= url.openStream();
    IOUtils.copyBytes(inputStream, System.out, 4096,true);
}

public static void testHadoop() throws Exception {
    FileSystem fileSystem = getFileSystemInstance();

    Boolean success = fileSystem.mkdirs(new Path("/skcc"));
    System.out.println("mkdirs is " + success);

    success = fileSystem.exists(new Path("/10803060234.txt"));
    System.out.println("file exists is " + success);

    success = fileSystem.delete(new Path("/test2.data"),true);
    System.out.println("delete dirs is " + success);

    success = fileSystem.exists(new Path("/skcc"));
    System.out.println("dirs exists is "+ success);

}

public static void uploadFileToHDFS() throws Exception {
    FileSystem fileSystem = getFileSystemInstance();
    String filename = "/test2.data";
    // overwrite ==true
    FSDataOutputStream outputStream = fileSystem.create(new Path(filename), true);
    FileInputStream fis = new FileInputStream("D:\\2018\\u001.zip");

// IOUtils.copyBytes(fis, outputStream, 4096, true);

    long totalLen = fis.getChannel().size();
    long tmpSize = 0;
    double readPercent = 0;
    NumberFormat numberFormat = NumberFormat.getInstance();
    numberFormat.setMaximumFractionDigits(0);
    System.out.println("totalLen : " + totalLen + " available : " + fis.available());
    byte[] buf = new byte[4096];
    int len = fis.read(buf);
    while (len != -1) {
        tmpSize = tmpSize + len;
        String result = numberFormat.format((float)tmpSize / (float)totalLen * 100 );

        outputStream.write(buf,0,len);
        System.out.println("Upload Percent : " + result + "%");
        len = fis.read(buf);

    }

}

}

推荐阅读:
  1. 浅谈HDFS的写流程
  2. 如何用手机写java程序

免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。

java hadoop ava

上一篇:nagios安装配置pnp4nagios-0.6

下一篇:electron+react-redux-saga基础项目配置

相关阅读

您好,登录后才能下订单哦!

密码登录
登录注册
其他方式登录
点击 登录注册 即表示同意《亿速云用户服务条款》