1

我正在尝试使用Spring Data - Hadoop从本地机器的 IDE 执行远程集群上的 MR 代码

Hadoop 1.1.2、Spring 3.2.4、Spring-Data-Hadoop 1.0.0

我的 bean 配置文件即。applicationContext.xml如下:

        <?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:hdp="http://www.springframework.org/schema/hadoop"
    xmlns:context="http://www.springframework.org/schema/context"
    xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd 
    http://www.springframework.org/schema/hadoop http://www.springframework.org/schema/hadoop/spring-hadoop.xsd
    http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.2.xsd">

    <context:property-placeholder location="resources/hadoop.properties" />

    <hdp:configuration file-system-uri="${hd.fs}" job-tracker-uri="${hd.jobtracker.uri}">

    </hdp:configuration>

    <hdp:job id="wc-job" mapper="com.hadoop.basics.WordCounter.WCMapper"
        reducer="com.hadoop.basics.WordCounter.WCReducer" input-path="${wordcount.input.path}"
        output-path="${wordcount.output.path}" user="bigdata">
    </hdp:job>

    <hdp:job-runner id="myjobs-runner" job-ref="wc-job"
        run-at-startup="true" />

    <hdp:resource-loader id="resourceLoader" uri="${hd.fs}"
        user="bigdata" />   
</beans>

hadoop.properties

hd.fs=hdfs://cloudx-843-770:9000
hd.jobtracker.uri=cloudx-843-770:9001

wordcount.input.path=/scratchpad/input/Childhood_days.txt
wordcount.output.path=/scratchpad/output

我正在做的java类'运行为......'

package com.hadoop.basics;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

public class WordCounter {

    private static IntWritable one = new IntWritable(1);

    public class WCMapper extends Mapper<Text, Text, Text, IntWritable> {

        @Override
        protected void map(
                Text key,
                Text value,
                org.apache.hadoop.mapreduce.Mapper<Text, Text, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            StringTokenizer strTokenizer = new StringTokenizer(value.toString());
            Text token = new Text();

            while (strTokenizer.hasMoreTokens()) {
                token.set(strTokenizer.nextToken());
                context.write(token, one);
            }

        }
    }

    public class WCReducer extends
            Reducer<Text, IntWritable, Text, IntWritable> {
        @Override
        protected void reduce(
                Text key,
                Iterable<IntWritable> values,
                org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub

            int sum = 0;

            for (IntWritable value : values) {
                sum += value.get();
            }

            context.write(key, new IntWritable(sum));
        }
    }

    public static void main(String[] args) {
        AbstractApplicationContext context = new ClassPathXmlApplicationContext(
                "applicationContext.xml", WordCounter.class);
        System.out.println("Word Count Application Running");
        context.registerShutdownHook();
    }
}

输出是:

Aug 22, 2013 9:59:02 AM org.springframework.context.support.AbstractApplicationContext prepareRefresh
INFO: Refreshing org.springframework.context.support.ClassPathXmlApplicationContext@1815338: startup date [Thu Aug 22 09:59:02 IST 2013]; root of context hierarchy
Aug 22, 2013 9:59:03 AM org.springframework.beans.factory.xml.XmlBeanDefinitionReader loadBeanDefinitions
INFO: Loading XML bean definitions from class path resource [com/hadoop/basics/applicationContext.xml]
Aug 22, 2013 9:59:03 AM org.springframework.core.io.support.PropertiesLoaderSupport loadProperties
INFO: Loading properties file from class path resource [resources/hadoop.properties]
Aug 22, 2013 9:59:03 AM org.springframework.beans.factory.support.DefaultListableBeanFactory preInstantiateSingletons
INFO: Pre-instantiating singletons in org.springframework.beans.factory.support.DefaultListableBeanFactory@7c197e: defining beans [org.springframework.context.support.PropertySourcesPlaceholderConfigurer#0,hadoopConfiguration,wc-job,myjobs-runner,resourceLoader]; root of factory hierarchy
Aug 22, 2013 9:59:03 AM org.springframework.data.hadoop.mapreduce.JobExecutor$2 run
INFO: Starting job [wc-job]
Aug 22, 2013 9:59:03 AM org.apache.hadoop.security.UserGroupInformation doAs
SEVERE: PriviledgedActionException as:bigdata via 298790 cause:org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
Aug 22, 2013 9:59:03 AM org.springframework.data.hadoop.mapreduce.JobExecutor$2 run
WARNING: Cannot start job [wc-job]
org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.apache.hadoop.ipc.Client.call(Client.java:1107)
    at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
    at org.apache.hadoop.mapred.$Proxy2.getProtocolVersion(Unknown Source)
    at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:411)
    at org.apache.hadoop.mapred.JobClient.createRPCProxy(JobClient.java:499)
    at org.apache.hadoop.mapred.JobClient.init(JobClient.java:490)
    at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:473)
    at org.apache.hadoop.mapreduce.Job$1.run(Job.java:513)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
    at org.apache.hadoop.mapreduce.Job.connect(Job.java:511)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:499)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:197)
    at org.springframework.core.task.SyncTaskExecutor.execute(SyncTaskExecutor.java:49)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:168)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:160)
    at org.springframework.data.hadoop.mapreduce.JobRunner.call(JobRunner.java:52)
    at org.springframework.data.hadoop.mapreduce.JobRunner.afterPropertiesSet(JobRunner.java:44)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1541)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1479)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:521)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:458)
    at org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:295)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:223)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:292)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:194)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:628)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:932)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:479)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:197)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:172)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:158)
    at com.hadoop.basics.WordCounter.main(WordCounter.java:58)

Aug 22, 2013 9:59:03 AM org.springframework.beans.factory.support.DefaultSingletonBeanRegistry destroySingletons
INFO: Destroying singletons in org.springframework.beans.factory.support.DefaultListableBeanFactory@7c197e: defining beans [org.springframework.context.support.PropertySourcesPlaceholderConfigurer#0,hadoopConfiguration,wc-job,myjobs-runner,resourceLoader]; root of factory hierarchy
Exception in thread "main" org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'myjobs-runner': Invocation of init method failed; nested exception is java.lang.IllegalStateException: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1482)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:521)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:458)
    at org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:295)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:223)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:292)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:194)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:628)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:932)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:479)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:197)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:172)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:158)
    at com.hadoop.basics.WordCounter.main(WordCounter.java:58)
Caused by: java.lang.IllegalStateException: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:209)
    at org.springframework.core.task.SyncTaskExecutor.execute(SyncTaskExecutor.java:49)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:168)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:160)
    at org.springframework.data.hadoop.mapreduce.JobRunner.call(JobRunner.java:52)
    at org.springframework.data.hadoop.mapreduce.JobRunner.afterPropertiesSet(JobRunner.java:44)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1541)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1479)
    ... 13 more
Caused by: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.apache.hadoop.ipc.Client.call(Client.java:1107)
    at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
    at org.apache.hadoop.mapred.$Proxy2.getProtocolVersion(Unknown Source)
    at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:411)
    at org.apache.hadoop.mapred.JobClient.createRPCProxy(JobClient.java:499)
    at org.apache.hadoop.mapred.JobClient.init(JobClient.java:490)
    at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:473)
    at org.apache.hadoop.mapreduce.Job$1.run(Job.java:513)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
    at org.apache.hadoop.mapreduce.Job.connect(Job.java:511)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:499)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:197)
    ... 20 more

很明显,集群上无法识别用户 298790(我的本地 Windows 机器用户) - 这就是配置中的原因。文件

  1. 如文档中所述,我在作业配置中指定了 user="bigdata" 。
  2. 医生。还提到:

SHDP 遵循 HDFS 权限,使用当前用户的身份(默认情况下)与文件系统交互。特别是,HdfsResourceLoader 在进行模式匹配时只考虑它应该看到的文件并且不执行任何特权操作。但是可以指定不同的用户,这意味着 ResourceLoader 使用该用户的权限与 HDFS 交互 - 但是这遵守用户模拟规则根据api,我决定使用HdfsResourceLoader但在文档 - 任何人都可以提供任何指针吗?

  1. 根据Hadoop Secure Impersonation,我相信我需要将我的 Windows 用户 298790 添加到远程集群机器(Ubuntu)用户组以及我的 Windows 主机名,如果没有大号,我认为这是不可行的。用户数量和不断变化的 Windows 客户端计算机。如果我的假设是正确的,可以做些什么来避免添加和配置所有这些用户?

/添加对 core-site.xml 的更改/

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>
<name>fs.default.name</name>
<value>hdfs://cloudx-843-770:9000</value>
</property>

<property>
<name>hadoop.proxyuser.298790.groups</name>
<value>bigdata</value>
<description>Allow the superuser bigdatato impersonate any members of the group bigdata</description>
</property>

<property>
<name>hadoop.proxyuser.298790.hosts</name>
<value>*</value>
<description>The superuser can connect only from INFVA03351 to impersonate a user</description>
</property>

</configuration>

我重新启动了所有 Hadoop 进程,但错误仍然存​​在。

然后,我决定创建一个新用户即。298790 在远程 Ubuntu 机器上并将其添加到组 bigdata 以进行模拟:

    root@cloudx-843-770:/home/bigdata# useradd -G bigdata 298790
    root@cloudx-843-770:/home/bigdata#
    root@cloudx-843-770:/home/bigdata#
    root@cloudx-843-770:/home/bigdata# usermod -G bigdata 298790
    root@cloudx-843-770:/home/bigdata#
    root@cloudx-843-770:/home/bigdata# su 298790
    $ groups
    298790 bigdata
root@cloudx-843-770:/home/bigdata#
root@cloudx-843-770:/home/bigdata# cat /etc/passwd
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/bin/sh
bin:x:2:2:bin:/bin:/bin/sh
sys:x:3:3:sys:/dev:/bin/sh
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/bin/sh
man:x:6:12:man:/var/cache/man:/bin/sh
lp:x:7:7:lp:/var/spool/lpd:/bin/sh
mail:x:8:8:mail:/var/mail:/bin/sh
news:x:9:9:news:/var/spool/news:/bin/sh
uucp:x:10:10:uucp:/var/spool/uucp:/bin/sh
proxy:x:13:13:proxy:/bin:/bin/sh
www-data:x:33:33:www-data:/var/www:/bin/sh
backup:x:34:34:backup:/var/backups:/bin/sh
list:x:38:38:Mailing List Manager:/var/list:/bin/sh
irc:x:39:39:ircd:/var/run/ircd:/bin/sh
gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/bin/sh
nobody:x:65534:65534:nobody:/nonexistent:/bin/sh
libuuid:x:100:101::/var/lib/libuuid:/bin/sh
syslog:x:101:103::/home/syslog:/bin/false
mysql:x:102:105:MySQL Server,,,:/nonexistent:/bin/false
messagebus:x:103:106::/var/run/dbus:/bin/false
whoopsie:x:104:107::/nonexistent:/bin/false
landscape:x:105:110::/var/lib/landscape:/bin/false
sshd:x:106:65534::/var/run/sshd:/usr/sbin/nologin
tomcat6:x:107:113::/usr/share/tomcat6:/bin/false
coesystem:x:1000:1000:coesystem,,,:/home/coesystem:/bin/bash
hpcc:x:999:1001:hpcc Runtime User:/home/hpcc:/bin/sh
hduser:x:1001:1002:hduser,1,1,1,1:/home/hduser:/bin/bash
bigdata:x:1002:1003:Big Data,1,1,1,1:/home/bigdata:/bin/bash
298790:x:1003:1004::/home/298790:/bin/sh

但是现在当我尝试停止(然后启动)集群时,它会询问所有进程的密码:

bigdata@cloudx-843-770:~/hadoop_ecosystem/apache_hadoop/hadoop-1.1.2/bin$ stop-all.sh
Warning: $HADOOP_HOME is deprecated.

stopping jobtracker
bigdata@localhost's password:
localhost: stopping tasktracker
stopping namenode
bigdata@localhost's password:
localhost: stopping datanode
bigdata@localhost's password:
localhost: stopping secondarynamenode

现在错误稍作修改 - 它首先无法连接,然后模拟

Aug 22, 2013 5:14:17 PM org.springframework.context.support.AbstractApplicationContext prepareRefresh
INFO: Refreshing org.springframework.context.support.ClassPathXmlApplicationContext@922804: startup date [Thu Aug 22 17:14:17 IST 2013]; root of context hierarchy
Aug 22, 2013 5:14:17 PM org.springframework.beans.factory.xml.XmlBeanDefinitionReader loadBeanDefinitions
INFO: Loading XML bean definitions from class path resource [com/hadoop/basics/applicationContext.xml]
Aug 22, 2013 5:14:17 PM org.springframework.core.io.support.PropertiesLoaderSupport loadProperties
INFO: Loading properties file from class path resource [resources/hadoop.properties]
Aug 22, 2013 5:14:17 PM org.springframework.beans.factory.support.DefaultListableBeanFactory preInstantiateSingletons
INFO: Pre-instantiating singletons in org.springframework.beans.factory.support.DefaultListableBeanFactory@7c197e: defining beans [org.springframework.context.support.PropertySourcesPlaceholderConfigurer#0,hadoopConfiguration,wc-job,myjobs-runner,resourceLoader]; root of factory hierarchy
Aug 22, 2013 5:14:18 PM org.springframework.data.hadoop.mapreduce.JobExecutor$2 run
INFO: Starting job [wc-job]
Aug 22, 2013 5:14:20 PM org.apache.hadoop.ipc.Client$Connection handleConnectionFailure
INFO: Retrying connect to server: cloudx-843-770/172.25.37.135:9001. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
Aug 22, 2013 5:14:22 PM org.apache.hadoop.ipc.Client$Connection handleConnectionFailure
INFO: Retrying connect to server: cloudx-843-770/172.25.37.135:9001. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
Aug 22, 2013 5:14:24 PM org.apache.hadoop.ipc.Client$Connection handleConnectionFailure
INFO: Retrying connect to server: cloudx-843-770/172.25.37.135:9001. Already tried 2 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
Aug 22, 2013 5:14:26 PM org.apache.hadoop.ipc.Client$Connection handleConnectionFailure
INFO: Retrying connect to server: cloudx-843-770/172.25.37.135:9001. Already tried 3 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
Aug 22, 2013 5:14:36 PM org.apache.hadoop.security.UserGroupInformation doAs
SEVERE: PriviledgedActionException as:bigdata via 298790 cause:org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
Aug 22, 2013 5:14:36 PM org.springframework.data.hadoop.mapreduce.JobExecutor$2 run
WARNING: Cannot start job [wc-job]
org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.apache.hadoop.ipc.Client.call(Client.java:1107)
    at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
    at org.apache.hadoop.mapred.$Proxy2.getProtocolVersion(Unknown Source)
    at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:411)
    at org.apache.hadoop.mapred.JobClient.createRPCProxy(JobClient.java:499)
    at org.apache.hadoop.mapred.JobClient.init(JobClient.java:490)
    at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:473)
    at org.apache.hadoop.mapreduce.Job$1.run(Job.java:513)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
    at org.apache.hadoop.mapreduce.Job.connect(Job.java:511)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:499)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:197)
    at org.springframework.core.task.SyncTaskExecutor.execute(SyncTaskExecutor.java:49)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:168)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:160)
    at org.springframework.data.hadoop.mapreduce.JobRunner.call(JobRunner.java:52)
    at org.springframework.data.hadoop.mapreduce.JobRunner.afterPropertiesSet(JobRunner.java:44)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1541)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1479)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:521)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:458)
    at org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:295)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:223)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:292)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:194)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:628)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:932)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:479)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:197)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:172)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:158)
    at com.hadoop.basics.WordCounter.main(WordCounter.java:58)

Aug 22, 2013 5:14:36 PM org.springframework.beans.factory.support.DefaultSingletonBeanRegistry destroySingletons
INFO: Destroying singletons in org.springframework.beans.factory.support.DefaultListableBeanFactory@7c197e: defining beans [org.springframework.context.support.PropertySourcesPlaceholderConfigurer#0,hadoopConfiguration,wc-job,myjobs-runner,resourceLoader]; root of factory hierarchy
Exception in thread "main" org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'myjobs-runner': Invocation of init method failed; nested exception is java.lang.IllegalStateException: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1482)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:521)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:458)
    at org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:295)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:223)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:292)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:194)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:628)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:932)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:479)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:197)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:172)
    at org.springframework.context.support.ClassPathXmlApplicationContext.<init>(ClassPathXmlApplicationContext.java:158)
    at com.hadoop.basics.WordCounter.main(WordCounter.java:58)
Caused by: java.lang.IllegalStateException: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:209)
    at org.springframework.core.task.SyncTaskExecutor.execute(SyncTaskExecutor.java:49)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:168)
    at org.springframework.data.hadoop.mapreduce.JobExecutor.startJobs(JobExecutor.java:160)
    at org.springframework.data.hadoop.mapreduce.JobRunner.call(JobRunner.java:52)
    at org.springframework.data.hadoop.mapreduce.JobRunner.afterPropertiesSet(JobRunner.java:44)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1541)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1479)
    ... 13 more
Caused by: org.apache.hadoop.ipc.RemoteException: User: 298790 is not allowed to impersonate bigdata
    at org.apache.hadoop.ipc.Client.call(Client.java:1107)
    at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
    at org.apache.hadoop.mapred.$Proxy2.getProtocolVersion(Unknown Source)
    at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:411)
    at org.apache.hadoop.mapred.JobClient.createRPCProxy(JobClient.java:499)
    at org.apache.hadoop.mapred.JobClient.init(JobClient.java:490)
    at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:473)
    at org.apache.hadoop.mapreduce.Job$1.run(Job.java:513)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Unknown Source)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
    at org.apache.hadoop.mapreduce.Job.connect(Job.java:511)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:499)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
    at org.springframework.data.hadoop.mapreduce.JobExecutor$2.run(JobExecutor.java:197)
    ... 20 more
4

1 回答 1

2

如果您希望将用户模拟为另一个用户,Apache Hadoop 要求您的服务端配置允许这样的事情。

这意味着,如果您以“foo”身份运行,并且您希望实际将作业提交为“bar”,那么您的 NameNode/JobTracker 需要其加载的 core-site.xml 配置允许“foo”代理其他用户,通常类似下面的内容必须存在于 NameNode/JobTracker 的 core-site.xml 中:

<property>
  <name>hadoop.proxyuser.foo.groups</name>
  <value>*</value>
</property>

<property>
  <name>hadoop.proxyuser.foo.hosts</name>
  <value>*</value>
</property>

这将允许用户 foo 模拟任何其他用户(* 代表组),并在从任何主机提交时这样做(* 代表主机)。

虽然没有必要,但绝对建议在 NameNode 上存在已定义的用户和组,以便权限、组解析等以适当的方式工作。更多信息在这里:http ://www.cloudera.com/blog/2012/03/authorization-and-authentication-in-hadoop/

http://static.springsource.org/spring-hadoop/docs/1.0.x/reference/html/security.html#security:kerberos上的文档可以更清楚地解决这个问题。

于 2013-08-22T10:35:04.207 回答