`
zy19982004
  • 浏览: 654081 次
  • 性别: Icon_minigender_1
  • 来自: 深圳
博客专栏
F6f66edc-1c1a-3859-b76b-a22e740b7aa7
Hadoop学习
浏览量:249774
社区版块
存档分类
最新评论

Hadoop学习二十:Hadoop-Hdfs 通信机制VersionedProtocol接口方法

 
阅读更多

一. VersionedProtocol

//使用Hadoop RPC机制的所有协议的超类  
//所有子类所以自己的协议号static final long versionID
public interface VersionedProtocol {
  /**  
   * 返回与指定协议protocol相关的协议版本  
   * @param protocol 协议接口的类名  
   * @param clientVersion 客户端欲与服务器进行交互,它所使用的协议版本  
   * @return 返回服务器将要与客户端进行交互,所需要使用的协议版本  
   */   
  public long getProtocolVersion(String protocol,  long clientVersion) throws IOException;
}

 

二.ClientDatanodeProtocol

//ClientDatanodeProtocol用于Client和DataNode通信
public interface ClientDatanodeProtocol extends VersionedProtocol {
  //ClientDatanodeProtocol版本号4L
  public static final long versionID = 4L;


  //恢复block
  LocatedBlock recoverBlock(Block block, boolean keepLength,
      DatanodeInfo[] targets) throws IOException;

  //获得当前DataNode上的block
  Block getBlockInfo(Block block) throws IOException;

  //获得指定block的位置信息BlockLocalPathInfo
  BlockLocalPathInfo getBlockLocalPathInfo(Block block,
      Token<BlockTokenIdentifier> token) throws IOException;           
}

 

 

三. InterDatanodeProtocol

 

//InterDatanodeProtocol用于DataNode与DataNode通信
public interface InterDatanodeProtocol extends VersionedProtocol {
  //InterDatanodeProtocol 版本号3L
  public static final long versionID = 3L;

  //获得指定block的元数据信息
  BlockMetaDataInfo getBlockMetaDataInfo(Block block) throws IOException;

  //恢复数据块
  BlockRecoveryInfo startBlockRecovery(Block block) throws IOException;
  
  //更新数据块
  void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException;
}

 

四. ClientProtocol

 

/**
 * ClientProtocol用于用户通过DistributedFileSystem访问NameNode
 * Client可以操作namespace目录,打开/关闭文件流等等
 * 以下所有方法发起方都是Client,接收方都是NameNode
 */
public interface ClientProtocol extends VersionedProtocol {

    //ClientProtocol 协议号61L
  public static final long versionID = 61L;
  
  ///////////////////////////////////////
  // File contents
  ///////////////////////////////////////
  
//获得一个文件对应的list<块,块的位置<list<位置>>信息
  public LocatedBlocks  getBlockLocations(String src,
                                          long offset,
                                          long length) throws IOException;

  //在命名空间中创建一个文件入口。该方法将创建一个由src路径指定的空文件
  //该路径src应该反映了从root目录开始的一个完整路径名称
  //从客户端的角度,Namenode并没有“当前”目录的概念。一旦文件创建成功,该文件就是可见的,并可以被其它客户端来执行读操作。
  //但是,其它客户端不能够对该文件进行删除、重命名、重写,而这些操作只有在该文件被完全或明确指定为租约到期,才可以执行。
  //每个块都具有最大长度限制,如果客户端想要创建多个块,可以调用addBlock(String, String)方法来实现。
  public void create(String src, 
                     FsPermission masked,
                             String clientName, 
                             boolean overwrite, 
                             boolean createParent,
                             short replication,
                             long blockSize
                             ) throws IOException;

  public void create(String src, 
                     FsPermission masked,
                             String clientName, 
                             boolean overwrite, 
                             short replication,
                             long blockSize
                             ) throws IOException;
 
  //向文件src中追加写入内容,返回一个org.apache.hadoop.hdfs.protocol.LocatedBlock对象
  //该对象封装了 块,块的位置<list<位置>的对应关系,通过追加写操作后的返回信息,可以定位到追加写入最后部分块的信息。
  public LocatedBlock append(String src, String clientName) throws IOException;
  

  //开始恢复租约
  public boolean recoverLease(String src, String clientName) throws IOException;


  //设置已经存在的文件的复制因子
  //Namenode会为指定文件设置副本因子,但是,不期望在调用该方法的过程修改实际块的副本因子,而是由后台块维护进程来执行:
  //如果当前副本因子小于设置的新副本因子,需要增加一些块副本,如果当前副本因子大于设置的新副本因子,就会删除一些块副本。
  public boolean setReplication(String src, 
                                short replication
                                ) throws IOException;


  //为已经存在的目录或者文件,设置给定的操作权限  
  public void setPermission(String src, FsPermission permission
      ) throws IOException;


  //设置文件或目录属主  
  public void setOwner(String src, String username, String groupname
      ) throws IOException;

 
  //客户端放弃对指定块的操作
  public void abandonBlock(Block b, String src, String holder
      ) throws IOException;


  public LocatedBlock addBlock(String src, String clientName) throws IOException;

  
  //客户端向一个当前为写操作打开的文件写入数据块  
  public LocatedBlock addBlock(String src, String clientName,
                               DatanodeInfo[] excludedNodes) throws IOException;

  //客户端完成对指定文件的写操作,并期望能够写完
  //在写完以后关闭文件,返回是否成功关闭;如果返回false,调用者应该再次发送请求
  //该方法只有在所有文件的文件块全部达到最小复制因子时才返回true
  public boolean complete(String src, String clientName) throws IOException;

  //客户端报告corrupted块的信息
  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;

  ///////////////////////////////////////
  // Namespace management
  ///////////////////////////////////////

  //重命名文件系统里的一个文件或目录   
  public boolean rename(String src, String dst) throws IOException;

  public boolean delete(String src) throws IOException;


  //删除文件系统里的一个文件或目录 ,recursive 是否递归删除子目录
  public boolean delete(String src, boolean recursive) throws IOException;
  

  //创建指定权限的目录
  public boolean mkdirs(String src, FsPermission masked) throws IOException;


  //获取指定目录的目录/文件列表
  public DirectoryListing getListing(String src, byte[] startAfter)
  throws IOException;

  ///////////////////////////////////////
  // System issues and management
  ///////////////////////////////////////

  
  //一个client操作某个文件时,为了防止其它clinet同时操作此文件,该client获取了一些列锁
  //如果此时该client突然意外死亡,Namenode会销毁这个锁并认为此client已经死亡
  //如果此client发送renewLease请求,告诉Namenode我没死;如果一段时间不发送,NameNode认为此client彻底死亡
  public void renewLease(String clientName) throws IOException;

  
  //获取文件系统现有的状态信息
  /**  
   * 返回一组标识文件系统不同信息的索引数组:  
   * <ul>  
   * <li> [0] 包含文件系统总存储容量(按字节计算)</li>  
   * <li> [1] 包含文件系统已使用空间(按字节计算)</li>  
   * <li> [2] 包含文件系统可使用空间(按字节计算)</li>  
   * <li> [3] 包含文件系统中不满足副本因子数量的块的数量</li>  
   * <li> [4] 包含corrupt副本的块的数量 </li>  
   * <li> [5] 包含没有任何可以用的块副本的块的数量</li>  
   * </ul>  
   */   
  public long[] getStats() throws IOException;


  //获得当前DataNodes的状态信息
  public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
  throws IOException;

  //获得指定文件的block size
  public long getPreferredBlockSize(String filename) throws IOException;

  
  //进入 离开 获取安全模式
  public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException;

   //保存命名空间映像
  //保存当前namespace到storage directories,然后进行checkpoint操作
  public void saveNamespace() throws IOException;

    //告诉NameNode重读hosts文件
  public void refreshNodes() throws IOException;

   //完成升级
  public void finalizeUpgrade() throws IOException;

    //获得升级过程中的当前状态
  public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) 
  throws IOException;

   //将Namenode结点上的数据结构写入到指定的文件中,如果指定文件已经存在,则追加到该文件中,就是checkpoint过程  
  public void metaSave(String filename) throws IOException;


  //重新为所有DataNode设置带宽
  public void setBalancerBandwidth(long bandwidth) throws IOException;


  //获得指定文件或目录的状态信息
  public HdfsFileStatus getFileInfo(String src) throws IOException;


  //获得指定目录的容量
  public ContentSummary getContentSummary(String path) throws IOException;

  
  //设置指定目录的配额
  public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
                      throws IOException;
  
  
  //设置给定文件的修改时间
  public void setTimes(String src, long mtime, long atime) throws IOException;


  //获得令牌
  public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException;


  //重新创建令牌
  public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
      throws IOException;

  //取消已经存在的令牌
  public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
      throws IOException;

 

五. DatanodeProtocol

//DatanodeProtocol用于Datanode与NameNode的通信
//上报心跳 等
//NameNode与DataNode通信的唯一方式是 DataNode----->NameNode时带回返回值
public interface DatanodeProtocol extends VersionedProtocol {
  
	//DatanodeProtocol版本号26L
  public static final long versionID = 26L;

  
  /**  
   * 当接收到Datanode的命令的时候,根据下述状态码确定Datanode应该执行何种操作,   
   */   
  final static int DNA_UNKNOWN = 0;    // 未知      
  final static int DNA_TRANSFER = 1;   // 将数据块从一个Datanode转移到另一个Datanode   
  final static int DNA_INVALIDATE = 2; // 未验证数据块   
  final static int DNA_SHUTDOWN = 3;   // 关闭Datanode   
  final static int DNA_REGISTER = 4;   // 重新注册   
  final static int DNA_FINALIZE = 5;   // 完成先前执行的升级操作   
  final static int DNA_RECOVERBLOCK = 6;  // 数据块恢复操作请求
  final static int DNA_ACCESSKEYUPDATE = 7;  // 更新 access key
  final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // 更改带宽

  //注册一个Datanode
  public DatanodeRegistration register(DatanodeRegistration registration
                                       ) throws IOException;

  //发送心跳,带回返回值
  //心跳告诉NameNode此DataNode的存活状态和其它状态信息
  //返回值告诉DataNode移除或拷贝某些block
  public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
                                       long capacity,
                                       long dfsUsed, long remaining,
                                       int xmitsInProgress,
                                       int xceiverCount) throws IOException;

  //发送block report 告诉NameNode此DataNode上的block信息
  //NameNode返回DatanodeCommand 告诉DataNode孤立block和应该删除的block
  public DatanodeCommand blockReport(DatanodeRegistration registration,
                                     long[] blocks) throws IOException;
  

  //告诉NameNode blocks-being-written信息
  public void blocksBeingWrittenReport(DatanodeRegistration registration,
      long[] blocks) throws IOException;
    

  //Datanode向Namenode报告最近接收到的数据块、删除的多余块副本  
  public void blockReceived(DatanodeRegistration registration,
                            Block blocks[],
                            String[] delHints) throws IOException;

  //向Namenode报告错误信息  
  public void errorReport(DatanodeRegistration registration,
                          int errorCode, 
                          String msg) throws IOException;
  public NamespaceInfo versionRequest() throws IOException;


  //向Namenode发送一个升级命令  
  UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
  

  //向Namenode报告Bad Blocks  
  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
  

  //为数据块生成新的时间戳   
  public long nextGenerationStamp(Block block, boolean fromNN) throws IOException;

  //在恢复数据块期间,提交事务:数据块同步 
  public void commitBlockSynchronization(Block block,
      long newgenerationstamp, long newlength,
      boolean closeFile, boolean deleteblock, DatanodeID[] newtargets
      ) throws IOException;
}

 

 

六. NamenodeProtocol

//NamenodeProtocol用于Secondary NameNode与NameNode的通信
public interface NamenodeProtocol extends VersionedProtocol {

	//NamenodeProtocol版本号3L
  public static final long versionID = 3L;


  //获得指定DataNode上的所有blocks
  public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
  throws IOException;


  //获得当前block keys
  public ExportedBlockKeys getBlockKeys() throws IOException;


  //获得editlog文件大小
  public long getEditLogSize() throws IOException;


  //参考第2步 http://zy19982004.iteye.com/blog/1870624
  public CheckpointSignature rollEditLog() throws IOException;


  //参考第456步 http://zy19982004.iteye.com/blog/1870624
  public void rollFsImage() throws IOException;
}

 

 

七. RefreshAuthorizationPolicyProtocol

/**
 * Protocol which is used to refresh the authorization policy in use currently.
 */
public interface RefreshAuthorizationPolicyProtocol extends VersionedProtocol {
  
  public static final long versionID = 1L;

  /**
   * Refresh the service-level authorization policy in-effect.
   * @throws IOException
   */
  void refreshServiceAcl() throws IOException;
}

 

八. RefreshUserMappingsProtocol

public interface RefreshUserMappingsProtocol extends VersionedProtocol {

  public static final long versionID = 1L;

  public void refreshUserToGroupsMappings() throws IOException;
  
  public void refreshSuperUserGroupsConfiguration() 
  throws IOException;
}

 

 

 

 

分享到:
评论

相关推荐

    hadoop-hdfs-client-2.9.1-API文档-中文版.zip

    赠送jar包:hadoop-hdfs-client-2.9.1.jar 赠送原API文档:hadoop-hdfs-client-2.9.1-javadoc.jar 赠送源代码:hadoop-hdfs-client-2.9.1-sources.jar 包含翻译后的API文档:hadoop-hdfs-client-2.9.1-javadoc-...

    hadoop-hdfs-client-2.9.1-API文档-中英对照版.zip

    赠送jar包:hadoop-hdfs-client-2.9.1.jar; 赠送原API文档:hadoop-hdfs-client-2.9.1-javadoc.jar; 赠送源代码:hadoop-hdfs-client-2.9.1-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-client-2.9.1.pom;...

    hadoop-hdfs-2.6.5-API文档-中文版.zip

    赠送jar包:hadoop-hdfs-2.6.5.jar; 赠送原API文档:hadoop-hdfs-2.6.5-javadoc.jar; 赠送源代码:hadoop-hdfs-2.6.5-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.6.5.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.7.3-API文档-中英对照版.zip

    赠送jar包:hadoop-hdfs-2.7.3.jar; 赠送原API文档:hadoop-hdfs-2.7.3-javadoc.jar; 赠送源代码:hadoop-hdfs-2.7.3-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.7.3.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.7.3-API文档-中文版.zip

    赠送jar包:hadoop-hdfs-2.7.3.jar; 赠送原API文档:hadoop-hdfs-2.7.3-javadoc.jar; 赠送源代码:hadoop-hdfs-2.7.3-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.7.3.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.9.1-API文档-中文版.zip

    赠送jar包:hadoop-hdfs-2.9.1.jar 赠送原API文档:hadoop-hdfs-2.9.1-javadoc.jar 赠送源代码:hadoop-hdfs-2.9.1-sources.jar 包含翻译后的API文档:hadoop-hdfs-2.9.1-javadoc-API文档-中文(简体)版.zip 对应...

    hadoop-hdfs-2.6.5-API文档-中英对照版.zip

    赠送jar包:hadoop-hdfs-2.6.5.jar; 赠送原API文档:hadoop-hdfs-2.6.5-javadoc.jar; 赠送源代码:hadoop-hdfs-2.6.5-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.6.5.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.5.1-API文档-中英对照版.zip

    赠送jar包:hadoop-hdfs-2.5.1.jar; 赠送原API文档:hadoop-hdfs-2.5.1-javadoc.jar; 赠送源代码:hadoop-hdfs-2.5.1-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.5.1.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.5.1-API文档-中文版.zip

    赠送jar包:hadoop-hdfs-2.5.1.jar; 赠送原API文档:hadoop-hdfs-2.5.1-javadoc.jar; 赠送源代码:hadoop-hdfs-2.5.1-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.5.1.pom; 包含翻译后的API文档:hadoop...

    hadoop-hdfs-2.9.1-API文档-中英对照版.zip

    赠送jar包:hadoop-hdfs-2.9.1.jar; 赠送原API文档:hadoop-hdfs-2.9.1-javadoc.jar; 赠送源代码:hadoop-hdfs-2.9.1-sources.jar; 赠送Maven依赖信息文件:hadoop-hdfs-2.9.1.pom; 包含翻译后的API文档:hadoop...

    hadoop-mapreduce-client-common-2.6.5-API文档-中英对照版.zip

    赠送jar包:hadoop-mapreduce-client-common-2.6.5.jar; 赠送原API文档:hadoop-mapreduce-client-common-2.6.5-javadoc.jar; 赠送源代码:hadoop-mapreduce-client-common-2.6.5-sources.jar; 赠送Maven依赖信息...

    hadoop-mapreduce-client-core-2.5.1-API文档-中文版.zip

    赠送jar包:hadoop-mapreduce-client-core-2.5.1.jar; 赠送原API文档:hadoop-mapreduce-client-core-2.5.1-javadoc.jar; 赠送源代码:hadoop-mapreduce-client-core-2.5.1-sources.jar; 赠送Maven依赖信息文件:...

    hadoop最新版本3.1.1全量jar包

    hadoop-auth-3.1.1.jar hadoop-hdfs-3.1.1.jar hadoop-mapreduce-client-hs-3.1.1.jar hadoop-yarn-client-3.1.1.jar hadoop-client-api-3.1.1.jar hadoop-hdfs-client-3.1.1.jar hadoop-mapreduce-client-jobclient...

    Hadoop 3.x(HDFS)----【HDFS 的 API 操作】---- 代码

    Hadoop 3.x(HDFS)----【HDFS 的 API 操作】---- 代码 Hadoop 3.x(HDFS)----【HDFS 的 API 操作】---- 代码 Hadoop 3.x(HDFS)----【HDFS 的 API 操作】---- 代码 Hadoop 3.x(HDFS)----【HDFS 的 API 操作】--...

    hadoop-mapreduce-client-jobclient-2.6.5-API文档-中文版.zip

    赠送jar包:hadoop-mapreduce-client-jobclient-2.6.5.jar; 赠送原API文档:hadoop-mapreduce-client-jobclient-2.6.5-javadoc.jar; 赠送源代码:hadoop-mapreduce-client-jobclient-2.6.5-sources.jar; 赠送...

    hadoop-yarn-common-2.6.5-API文档-中文版.zip

    赠送jar包:hadoop-yarn-common-2.6.5.jar 赠送原API文档:hadoop-yarn-common-2.6.5-javadoc.jar 赠送源代码:hadoop-yarn-common-2.6.5-sources.jar 包含翻译后的API文档:hadoop-yarn-common-2.6.5-javadoc-...

    hadoop-yarn-client-2.6.5-API文档-中文版.zip

    赠送jar包:hadoop-yarn-client-2.6.5.jar; 赠送原API文档:hadoop-yarn-client-2.6.5-javadoc.jar; 赠送源代码:hadoop-yarn-client-2.6.5-sources.jar; 赠送Maven依赖信息文件:hadoop-yarn-client-2.6.5.pom;...

    hadoop-yarn-api-2.5.1-API文档-中文版.zip

    赠送jar包:hadoop-yarn-api-2.5.1.jar; 赠送原API文档:hadoop-yarn-api-2.5.1-javadoc.jar; 赠送源代码:hadoop-yarn-api-2.5.1-sources.jar; 赠送Maven依赖信息文件:hadoop-yarn-api-2.5.1.pom; 包含翻译后...

    hadoop-mapreduce-client-app-2.7.3-API文档-中英对照版.zip

    赠送jar包:hadoop-mapreduce-client-app-2.7.3.jar; 赠送原API文档:hadoop-mapreduce-client-app-2.7.3-javadoc.jar; 赠送源代码:hadoop-mapreduce-client-app-2.7.3-sources.jar; 赠送Maven依赖信息文件:...

    hadoop-yarn-server-resourcemanager-2.6.0-API文档-中文版.zip

    赠送jar包:hadoop-yarn-server-resourcemanager-2.6.0.jar; 赠送原API文档:hadoop-yarn-server-resourcemanager-2.6.0-javadoc.jar; 赠送源代码:hadoop-yarn-server-resourcemanager-2.6.0-sources.jar; 赠送...

Global site tag (gtag.js) - Google Analytics