3

我试图使用 HbaseTestingUtility 测试我的 Hbase 代码。每次我使用下面的代码片段启动我的迷你集群时,都会遇到异常。

    public void startCluster()
    {
        File workingDirectory = new File("./");
        Configuration conf = new Configuration();
        System.setProperty("test.build.data", workingDirectory.getAbsolutePath());
        conf.set("test.build.data", new File(workingDirectory, "zookeeper").getAbsolutePath());
        conf.set("fs.default.name", "file:///");
        conf.set("zookeeper.session.timeout", "180000");
        conf.set("hbase.zookeeper.peerport", "2888");
        conf.set("hbase.zookeeper.property.clientPort", "2181");
        conf.addResource(new Path("conf/hbase-site1.xml"));
        try
        {
            masterDir = new File(workingDirectory, "hbase");
            conf.set(HConstants.HBASE_DIR, masterDir.toURI().toURL().toString());
        }
        catch (MalformedURLException e1)
        {
            logger.error(e1.getMessage());
        }

        Configuration hbaseConf = HBaseConfiguration.create(conf);
        utility = new HBaseTestingUtility(hbaseConf);

        // Change permission for dfs.data.dir, please refer
        // https://issues.apache.org/jira/browse/HBASE-5711 for more details.
        try
        {
            Process process = Runtime.getRuntime().exec("/bin/sh -c umask");
            BufferedReader br = new BufferedReader(new InputStreamReader(process.getInputStream()));
            int rc = process.waitFor();
            if (rc == 0)
            {
                String umask = br.readLine();

                int umaskBits = Integer.parseInt(umask, 8);
                int permBits = 0777 & ~umaskBits;
                String perms = Integer.toString(permBits, 8);

                logger.info("Setting dfs.datanode.data.dir.perm to " + perms);
                utility.getConfiguration().set("dfs.datanode.data.dir.perm", perms);
            }
            else
            {
                logger.warn("Failed running umask command in a shell, nonzero return value");
            }
        }
        catch (Exception e)
        {
            // ignore errors, we might not be running on POSIX, or "sh" might
            // not be on the path
            logger.warn("Couldn't get umask", e);
        }
        if (!checkIfServerRunning())
        {
            hTablePool = new HTablePool(conf, 1);
            try
            {
                zkCluster = new MiniZooKeeperCluster(conf);
                zkCluster.setDefaultClientPort(2181);
                zkCluster.setTickTime(18000);
                zkDir = new File(utility.getClusterTestDir().toString());
                zkCluster.startup(zkDir);
                utility.setZkCluster(zkCluster);
                utility.startMiniCluster();
                utility.getHBaseCluster().startMaster();
            }
            catch (Exception e)
            {
                e.printStackTrace();
                logger.error(e.getMessage());
                throw new RuntimeException(e);
            }
        }
    }

我得到了如下异常。

2013-09-10 15:26:26 INFO  ClientCnxn:849 - Socket connection established to localhost/127.0.0.1:2181, initiating session
2013-09-10 15:26:26 INFO  ZooKeeperServer:839 - Client attempting to establish new session at /127.0.0.1:45934
2013-09-10 15:26:26 INFO  ZooKeeperServer:595 - Established session 0x141074cd6150002 with negotiated timeout 180000 for client /127.0.0.1:45934
2013-09-10 15:26:26 INFO  ClientCnxn:1207 - Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x141074cd6150002, negotiated timeout = 180000
2013-09-10 15:26:26 INFO  HBaseRPC:289 - Server at localhost/127.0.0.1:42926 could not be reached after 1 tries, giving up.
2013-09-10 15:26:26 WARN  AssignmentManager:1714 - Failed assignment of -ROOT-,,0.70236052 to localhost,42926,1378806982623, trying to assign elsewhere instead; retry=0
org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed setting up proxy interface org.apache.hadoop.hbase.ipc.HRegionInterface to localhost/127.0.0.1:42926 after attempts=1
    at org.apache.hadoop.hbase.ipc.HBaseRPC.handleConnectionException(HBaseRPC.java:291)
    at org.apache.hadoop.hbase.ipc.HBaseRPC.waitForProxy(HBaseRPC.java:259)
    at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getHRegionConnection(HConnectionManager.java:1305)
    at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getHRegionConnection(HConnectionManager.java:1261)
    at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getHRegionConnection(HConnectionManager.java:1248)
    at org.apache.hadoop.hbase.master.ServerManager.getServerConnection(ServerManager.java:550)
    at org.apache.hadoop.hbase.master.ServerManager.sendRegionOpen(ServerManager.java:483)
    at org.apache.hadoop.hbase.master.AssignmentManager.assign(AssignmentManager.java:1664)
    at org.apache.hadoop.hbase.master.AssignmentManager.assign(AssignmentManager.java:1387)
    at org.apache.hadoop.hbase.master.AssignmentManager.assign(AssignmentManager.java:1362)
    at org.apache.hadoop.hbase.master.AssignmentManager.assign(AssignmentManager.java:1357)
    at org.apache.hadoop.hbase.master.AssignmentManager.assignRoot(AssignmentManager.java:2236)
    at org.apache.hadoop.hbase.master.HMaster.assignRootAndMeta(HMaster.java:654)
    at org.apache.hadoop.hbase.master.HMaster.finishInitialization(HMaster.java:551)
    at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:362)
    at java.lang.Thread.run(Thread.java:722)
Caused by: java.net.ConnectException: Connection refused
    at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:692)
    at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:207)
    at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:525)
    at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:489)
    at org.apache.hadoop.hbase.ipc.HBaseClient$Connection.setupConnection(HBaseClient.java:416)
    at org.apache.hadoop.hbase.ipc.HBaseClient$Connection.setupIOstreams(HBaseClient.java:462)
    at org.apache.hadoop.hbase.ipc.HBaseClient.getConnection(HBaseClient.java:1150)
    at org.apache.hadoop.hbase.ipc.HBaseClient.call(HBaseClient.java:1000)
    at org.apache.hadoop.hbase.ipc.WritableRpcEngine$Invoker.invoke(WritableRpcEngine.java:150)
    at com.sun.proxy.$Proxy20.getProtocolVersion(Unknown Source)
    at org.apache.hadoop.hbase.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:183)
    at org.apache.hadoop.hbase.ipc.HBaseRPC.getProxy(HBaseRPC.java:335)
    at org.apache.hadoop.hbase.ipc.HBaseRPC.getProxy(HBaseRPC.java:312)
    at org.apache.hadoop.hbase.ipc.HBaseRPC.getProxy(HBaseRPC.java:364)
    at org.apache.hadoop.hbase.ipc.HBaseRPC.waitForProxy(HBaseRPC.java:236)
    ... 14 more
2013-09-10 15:26:26 WARN  AssignmentManager:1736 - Unable to find a viable location to assign region -ROOT-,,0.70236052
2013-09-10 15:27:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.oldlogs dst=null    perm=null
2013-09-10 15:27:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.archive dst=null    perm=null
2013-09-10 15:28:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.archive dst=null    perm=null
2013-09-10 15:28:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.oldlogs dst=null    perm=null
2013-09-10 15:29:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.oldlogs dst=null    perm=null
2013-09-10 15:29:24 INFO  audit:5677 - allowed=true ugi=aniket (auth:SIMPLE)    ip=/127.0.0.1   cmd=listStatus  src=/user/aniket/hbase/.archive dst=null    perm=null
2013-09-10 15:29:42 ERROR MiniHBaseCluster:201 - Error starting cluster
java.lang.RuntimeException: Master not initialized after 200 seconds
    at org.apache.hadoop.hbase.util.JVMClusterUtil.startup(JVMClusterUtil.java:206)
    at org.apache.hadoop.hbase.LocalHBaseCluster.startup(LocalHBaseCluster.java:420)
    at org.apache.hadoop.hbase.MiniHBaseCluster.init(MiniHBaseCluster.java:196)
    at org.apache.hadoop.hbase.MiniHBaseCluster.<init>(MiniHBaseCluster.java:76)
    at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniHBaseCluster(HBaseTestingUtility.java:635)
    at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:609)
    at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:557)
    at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:526)
    at HBaseTesting.startCluster(HBaseTesting.java:131)
    at HBaseTesting.main(HBaseTesting.java:62)
2013-09-10 15:29:42 INFO  HMaster:1635 - Cluster shutdown requested
2013-09-10 15:29:42 INFO  HRegionServer:1666 - STOPPED: Shutdown requested
2013-09-10 15:29:42 INFO  HBaseServer:1651 - Stopping server on 42926

谁能帮我解决这个问题。

4

2 回答 2

3

这是我的解决方案。

我不得不更新我的/etc/hosts文件。

2个感兴趣的条目是:

127.0.0.1       localhost
127.0.1.1       myhostname

我必须更改 myhostname 指向的 IP 也指向 127.0.0.1。

一旦我的/etc/hosts更新为如下所示:

127.0.0.1       localhost
127.0.0.1       myhostname

代码开始工作。

(这是假设一个 Linux 服务器。将 /etc/hosts 替换为您的操作系统的等效文件)

http://en.wikipedia.org/wiki/Hosts_(文件)

于 2013-11-12T19:39:06.203 回答
0

将番石榴依赖项添加到 Gradle 文件对我有用。

compile group: 'com.google.guava', name: 'guava', version: '14.0'
于 2020-08-14T11:43:35.120 回答