1. 客户端与NameNode接口 org.apache.hadoop.hdfs.protocol.ClientProtocol
public interface ClientProtocol extends VersionedProtocol {
public int GET_STATS_CAPACITY_IDX = 0;
public int GET_STATS_USED_IDX = 1;
public int GET_STATS_REMAINING_IDX = 2;
public int GET_STATS_UNDER_REPLICATED_IDX = 3;
public int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
public int GET_STATS_MISSING_BLOCKS_IDX = 5;
public LocatedBlocks getBlockLocations(String src,
long offset,
long length) throws IOException;
public void create(String src,
FsPermission masked,
String clientName,
boolean overwrite,
boolean createParent,
short replication,
long blockSize
) throws IOException;
public void create(String src,
FsPermission masked,
String clientName,
boolean overwrite,
short replication,
long blockSize
) throws IOException;
public LocatedBlock append(String src, String clientName) throws IOException;
public boolean recoverLease(String src, String clientName) throws IOException;
public booleansetReplication(String src,
short replication
) throws IOException;
public void setPermission(String src, FsPermission permission
) throws IOException;
public void setOwner(String src, String username, String groupname
) throws IOException;
public void abandonBlock(Block b, String src, String holder
) throws IOException;
public LocatedBlock addBlock(String src, String clientName) throws IOException;
public LocatedBlock addBlock(String src, String clientName,
DatanodeInfo[] excludedNodes) throws IOException;
public boolean complete(String src, String clientName) throws IOException;
public voidreportBadBlocks(LocatedBlock[] blocks) throws IOException;
public boolean rename(String src, String dst) throws IOException;
public boolean delete(String src) throws IOException;
public boolean delete(String src, boolean recursive) throws IOException;
public boolean mkdirs(String src, FsPermission masked) throws IOException;
public DirectoryListing getListing(String src, byte[] startAfter)
throws IOException;
public void renewLease(String clientName) throws IOException;
public long[] getStats() throws IOException;
public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
throws IOException;
public longgetPreferredBlockSize(String filename) throws IOException;
public booleansetSafeMode(FSConstants.SafeModeAction action) throws IOException;
public void saveNamespace() throws IOException;
public void refreshNodes() throws IOException;
public void finalizeUpgrade() throws IOException;
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException;
public void metaSave(String filename) throws IOException;
public voidsetBalancerBandwidth(long bandwidth) throws IOException;
public HdfsFileStatus getFileInfo(String src) throws IOException;
public ContentSummary getContentSummary(String path) throws IOException;
public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
throws IOException;
public void fsync(String src, String client) throws IOException;
public void setTimes(String src, long mtime, long atime) throws IOException;
public Token<DelegationTokenIdentifier>getDelegationToken(Text renewer) throws IOException;
public longrenewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException;
public voidcancelDelegationToken(Token<DelegationTokenIdentifier> token)
throwsIOException;
}
2.客户端与DataNode接口 org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol
public interface ClientDatanodeProtocol extends VersionedProtocol {
public static final long versionID = 4L;
LocatedBlock recoverBlock(Block block, boolean keepLength,
DatanodeInfo[] targets) throws IOException;
Block getBlockInfo(Block block) throws IOException;
BlockLocalPathInfo getBlockLocalPathInfo(Block block,
Token<BlockTokenIdentifier>token) throws IOException;
}
3. ClientProtocol、ClientDatanodeProtocol相关的类
数据块org.apache.hadoop.hdfs.protocol.Block
public classBlock implements Writable, Comparable<Block> {
private long blockId;
private long numBytes;
private long generationStamp;
}
已经定位的数据块 org.apache.hadoop.hdfs.protocol.LocatedBlock
public class LocatedBlock implements Writable {
private Block b;
private long offset; // offset of the first byte of the block in the file
private DatanodeInfo[] locs;
private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
}
多个数据块org.apache.hadoop.hdfs.protocol.LocatedBlocks
public class LocatedBlocks implements Writable {
private long fileLength;
private List<LocatedBlock> blocks;
private boolean underConstruction;
}
可本地优化的数据块org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo
public class BlockLocalPathInfo implements Writable {
private Block block;
private String localBlockPath = ""; // local file storing the data
privateString localMetaPath = ""; // local file storing the checksum
}
DataNode信息 org.apache.hadoop.hdfs.protocol.DatanodeInfo
public class DatanodeInfo extends DatanodeID implements Node {
protected long capacity;
protected long dfsUsed;
protected long remaining;
protected long lastUpdate;
protected int xceiverCount;
protected String location = NetworkTopology.DEFAULT_RACK;
protected String hostName = null;
// administrative states of a datanode
public enum AdminStates {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; }
protectedAdminStates adminState;
}
DataNode的ID:org.apache.hadoop.hdfs.protocol.DatanodeID
public class DatanodeID implements WritableComparable<DatanodeID> {
public static final DatanodeID[] EMPTY_ARRAY = {};
public String name; /// hostname:portNumber
public String storageID; /// unique per cluster storageID
protected int infoPort; /// the port where the infoserver is running
public int ipcPort;
}
HDFS的状态org.apache.hadoop.hdfs.protocol.HdfsFileStatus
public class HdfsFileStatus implements Writable {
private byte[] path; // local name of the inode that's encoded in java UTF8
private long length;
private boolean isdir;
private short block_replication;
private long blocksize;
private long modification_time;
private long access_time;
private FsPermission permission;
private String owner;
privateString group;
}
多个目录属性 apache.hadoop.hdfs.protocol.DirectoryListing
public class DirectoryListing implements Writable {
private HdfsFileStatus[] partialListing;
private int remainingEntries;
}
4.DataNode与NameNode接口 org.apache.hadoop.hdfs.server.protocol. DatanodeProtocol
public interface DatanodeProtocol extends VersionedProtocol {
public static final long versionID = 25L;
// error code
final static int NOTIFY = 0;
final static int DISK_ERROR = 1; // there are still valid volumes on DN
final static int INVALID_BLOCK = 2;
final static int FATAL_DISK_ERROR = 3; // no valid volumes left on DN
final static int DNA_UNKNOWN = 0; // unknown action
final static int DNA_TRANSFER = 1; // transfer blocks to another datanode
final static int DNA_INVALIDATE = 2; // invalidate blocks
final static int DNA_SHUTDOWN = 3; // shutdown node
final static int DNA_REGISTER = 4; // re-register
final static int DNA_FINALIZE = 5; // finalize previous upgrade
final static int DNA_RECOVERBLOCK = 6; // request a block recovery
final static int DNA_ACCESSKEYUPDATE = 7; // update access key
final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
public DatanodeRegistration register(DatanodeRegistration registration
) throws IOException;
public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
long capacity,
long dfsUsed, long remaining,
int xmitsInProgress,
int xceiverCount) throws IOException;
public DatanodeCommand blockReport(DatanodeRegistration registration,
long[] blocks) throws IOException;
public voidblocksBeingWrittenReport(DatanodeRegistration registration,
long[] blocks) throws IOException;
public voidblockReceived(DatanodeRegistration registration,
Block blocks[],
String[] delHints) throws IOException;
public void errorReport(DatanodeRegistration registration,
int errorCode,
String msg) throws IOException;
public NamespaceInfo versionRequest() throws IOException;
UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
public voidreportBadBlocks(LocatedBlock[] blocks) throws IOException;
public longnextGenerationStamp(Block block, boolean fromNN) throws IOException;
public voidcommitBlockSynchronization(Block block,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets
) throws IOException;
}
.5、DataNode之间接口org.apache.hadoop.hdfs.server.protocol. InterDatanodeProtocol
public interfaceInterDatanodeProtocol extends VersionedProtocol {
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
public static final long versionID = 3L;
BlockMetaDataInfo getBlockMetaDataInfo(Block block) throws IOException;
BlockRecoveryInfo startBlockRecovery(Block block) throws IOException;
void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException;
}
6.SecondNameNode与NameNode接口 org.apache.hadoop.hdfs.server.protocol. NamenodeProtocol
public interface NamenodeProtocol extends VersionedProtocol {
public static final long versionID = 3L;
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException;
public ExportedBlockKeys getBlockKeys() throws IOException;
public long getEditLogSize() throws IOException;
public CheckpointSignature rollEditLog() throws IOException;
public void rollFsImage() throws IOException;
}
7. DatanodeProtocol、InterDatanodeProtocol、NamenodeProtocol相关的类
DataNode注册 : org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration
public class DatanodeRegistration extends DatanodeID implements Writable {
public StorageInfo storageInfo;
publicExportedBlockKeys exportedKeys; //安全相关
}
其中org.apache.hadoop.hdfs.server.common.StorageInfo
public class StorageInfo {
public int layoutVersion; // Version read from the stored file.
public int namespaceID; // namespace id of the storage
public long cTime;
}
整个HDFS集群的信息org.apache.hadoop.hdfs.server.protocol.NamespaceInfo
public class NamespaceInfo extends StorageInfo implements Writable {
String buildVersion;
int distributedUpgradeVersion;
}
名字节点指令旧版本:
public interface DatanodeProtocol extends VersionedProtocol {
final static int DNA_UNKNOWN = 0; // unknown action
final static int DNA_TRANSFER = 1; // transfer blocks to another datanode
final static int DNA_INVALIDATE = 2; // invalidate blocks
final static int DNA_SHUTDOWN = 3; // shutdown node
final static int DNA_REGISTER = 4; // re-register
final static int DNA_FINALIZE = 5; // finalize previous upgrade
final static int DNA_RECOVERBLOCK = 6; // request a block recovery
final static int DNA_ACCESSKEYUPDATE = 7; // update access key
final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
}
名字节点指令新版本:.apache.hadoop.hdfs.server.protocol.DatanodeCommand
public abstract class DatanodeCommand implements Writable {
static class Register extends DatanodeCommand {
private Register() {super(DatanodeProtocol.DNA_REGISTER);}
public void readFields(DataInput in) {}
public void write(DataOutput out) {}
}
static class Finalize extends DatanodeCommand {
private Finalize() {super(DatanodeProtocol.DNA_FINALIZE);}
public void readFields(DataInput in) {}
public void write(DataOutput out) {}
}
public static final DatanodeCommand REGISTER = new Register();
public static final DatanodeCommand FINALIZE = new Finalize();
private int action;
}
8.DataNode间非IPC接口: org.apache.hadoop.hdfs.protocol.DataTransferProtocol
直接通过TCP的Socket通信,执行数据的读写
public interface DataTransferProtocol {
public static final int DATA_TRANSFER_VERSION = 17;
// Processed at datanode stream-handler
public static final byte OP_WRITE_BLOCK = (byte) 80;
public static final byte OP_READ_BLOCK = (byte) 81;
@Deprecated public static final byte OP_READ_METADATA = (byte) 82;
public static final byte OP_REPLACE_BLOCK = (byte) 83;
public static final byte OP_COPY_BLOCK = (byte) 84;
public static final byte OP_BLOCK_CHECKSUM = (byte) 85;
public static final int OP_STATUS_SUCCESS = 0;
public static final int OP_STATUS_ERROR = 1;
public static final int OP_STATUS_ERROR_CHECKSUM = 2;
public static final int OP_STATUS_ERROR_INVALID = 3;
public static final int OP_STATUS_ERROR_EXISTS = 4;
public static final int OP_STATUS_ERROR_ACCESS_TOKEN = 5;
public static final int OP_STATUS_CHECKSUM_OK = 6;
public static class PipelineAck implements Writable {
private long seqno;
private short replies[];
final public static long UNKOWN_SEQNO = -2;
}
}
9.SecondaryNameNode 与NameNode 的非IPC接口
通过HTTP通信,执行FSImage和Edits日志的合并
10.其他见参考书