3

最近我开始使用 hadoop 。现在我想从远程主机访问 hdfs,它没有安装 hadoop-client,只是依赖 hadoop-client-2.0.4-alpha.jar 。

但是当我尝试访问 hdfs 时,出现以下异常:

java.io.IOException: Failed on local exception: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status; Host Details : local host is: "webserver/127.0.0.1"; destination host is: "222.333.111.77":8020;
        at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:761)
        at org.apache.hadoop.ipc.Client.call(Client.java:1239)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:202)
        at $Proxy25.getFileInfo(Unknown Source)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
        at java.lang.reflect.Method.invoke(Method.java:597)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:164)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:83)
        at $Proxy25.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:630)
        at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1559)
        at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:811)
        at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1345)
        at com.kongming.kmdata.service.ExportService.copyToLocalFileFromHdfs(ExportService.java:60)
        at com.kongming.kmdata.service.KMReportManager.run(KMReportManager.java:105)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
        at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
        at java.util.concurrent.FutureTask.run(FutureTask.java:138)
        at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
        at java.lang.Thread.run(Thread.java:662)
Caused by: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status
        at com.google.protobuf.UninitializedMessageException.asInvalidProtocolBufferException(UninitializedMessageException.java:81)
        at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.buildParsed(RpcPayloadHeaderProtos.java:1094)
        at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.access$1300(RpcPayloadHeaderProtos.java:1028)
        at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto.parseDelimitedFrom(RpcPayloadHeaderProtos.java:986)
        at org.apache.hadoop.ipc.Client$Connection.receiveResponse(Client.java:946)
        at org.apache.hadoop.ipc.Client$Connection.run(Client.java:844)

它看起来像一个 rpc 异常,如何修复它?这是我的代码:

package com.xxx.xxx.service;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;

import com.xxx.xxx.fileSystem.IFilePath;
import com.xxx.xxx.inject.GuiceDependency;

public class ExportService {
private static Logger log = Logger.getLogger(ExportService.class);

private static Configuration configuration = new Configuration();

private static String dir = "./"; 

private static String hadoopConf = "hadoop-conf/";

static {

    configuration.addResource(new Path(hadoopConf + "core-site.xml"));
    configuration.addResource(new Path(hadoopConf + "hdfs-site.xml"));
    configuration.addResource(new Path(hadoopConf + "mapred-site.xml"));
    configuration.addResource(new Path(hadoopConf + "yarn-site.xml"));



}

public static boolean copyToLocalFileFromHdfs(String reportID) {


    IFilePath filePath = GuiceDependency.getInstance(IFilePath.class);

    String resultPath = filePath.getFinalResult(reportID) + "/part-r-00000";
    Path src = new Path(resultPath);

    String exportPath = dir + reportID + ".csv";

    Path dst = new Path(exportPath);
    System.out.println(configuration.get("fs.defaultFS"));
    System.out.println("zxz copyToLocalFileFromHdfs  scr: "
            + src.toString() + "  ,  dst: " + dst.toString());
    try {

        System.out.println("zxz   get fileSystem  start   ");

        FileSystem fs = FileSystem.get(configuration);
        System.out.println("zxz   get fileSystem  end   "
                + fs.getHomeDirectory().toString());
        System.out.println("zxz   ~~~~~~~~~~~~~~~~~~~~~~~~~"
                + fs.exists(src));
        ;

         fs.copyToLocalFile(false, src, dst);

        fs.copyToLocalFile(false, src, dst, true);
    } catch (Exception e) {

        // TODO Auto-generated catch block
        e.printStackTrace();
        log.error("copyFromHDFSFile error : ", e);

        return false;
    }
    System.out.println("zxz end copyToLocalFileFromHdfs for report: "
            + reportID);
    return true;
}

}

和 core-site.xml :

<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera CM on 2013-07-19T00:57:49.581Z-->
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://222.333.111.77:8020</value>
  </property>
  <property>
    <name>fs.trash.interval</name>
    <value>1</value>
  </property>
  <property>
    <name>io.file.buffer.size</name>
    <value>65536</value>
  </property>
  <property>
    <name>hadoop.security.authentication</name>
    <value>simple</value>
  </property>
  <property>
    <name>hadoop.rpc.protection</name>
    <value>authentication</value>
  </property>
  <property>
    <name>hadoop.security.auth_to_local</name>
    <value>DEFAULT</value>
  </property>
  <property>
  <name>hadoop.native.lib</name>
  <value>false</value>
  <description>Should native hadoop libraries, if present, be used.</description>
</property>

</configuration>

有人知道这个问题吗?非常感谢您的帮助~

4

1 回答 1

6

我相信使用 google protobuf 库的 hdfs。而且您的客户端代码似乎使用了错误(不兼容)版本的 protobuf。

于 2013-08-19T06:02:13.230 回答