1、读取文件
/**
* 测试读取文件
* @throws IOException
*/
@Test
public void testSave() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
FSDataInputStream fis = fs.open(new Path("hdfs://s150:8020/usr/xiaoqiu/hadoop/mydir1/hello1.txt"));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
IOUtils.copyBytes(fis,baos,1024);
fis.close();
System.out.println(new String(baos.toByteArray()));
}
2、写入文件
/**
* 测试写入文件
*/
@Test
public void testWrite () throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
FSDataOutputStream fos = fs.create(new Path("hdfs://s150:8020/usr/xiaoqiu/hadoop/mydir1/hello1.txt"));
fos.write("hello world".getBytes());
fos.close();
}
3、定制副本数和blocksize
/**
* 定制副本数和blocksize
*/
@Test
public void testWrite2 () throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
FSDataOutputStream fos = fs.create(new Path("hdfs://s150:8020/usr/xiaoqiu/hadoop/mydir1/hello1.txt"),
true,
1024,
(short)2,
1024);
fos.write("how are you ".getBytes());
fos.close();
}
修改最小区域块大小
[xiaoqiu@s150 /soft/hadoop/etc/hadoop]$ cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.fs-limits.min-block-size</name>
<value>1024</value>
</property>
</configuration>
修改了配置文件记得重启Hadoop