嗨,这是我第一个使用 hadoop 的程序,我修改了 wordcount 程序。我无法执行这个程序。我已将地图输出和减速器输入更改为<text text>
. 输入文件包含记录为email gender age 21
. 执行挂断显示Map 100% Reduce 100%
。
//MAPPER
public class WordMapper extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
String s = value.toString();
String s1;
Matcher m2;
FileSplit filesplit = (FileSplit) reporter.getInputSplit();
String fileName = filesplit.getPath().getName();
Pattern p1 = Pattern.compile("\\s+email\\s+gender\\s+age\\s+(\\S+)$");
m2=p1.matcher(s);
if (m2.find()){
s1 = m2.replaceFirst("omitted");
output.collect(new Text(s1), new Text(fileName));
}
}
}
//REDUCER
public class SumReducer extends MapReduceBase implements
Reducer<Text, Text, Text, IntWritable> {
@Override
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int cliCount = 0;
while (values.hasNext()) {
cliCount += 1;
}
output.collect(key, new IntWritable(cliCount));
}
}
//MAIN
public class WordCount extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.out.printf(
"Usage: %s [generic options] <input dir> <output dir>\n", getClass()
.getSimpleName());
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName(this.getClass().getName());
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
conf.setMapperClass(WordMapper.class);
conf.setReducerClass(SumReducer.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new WordCount(), args);
System.exit(exitCode);
}
}
更新:仅存在 _log 文件夹.xml
文件
我保持程序执行,hadoop 杀死了它。
3/06/19 15:02:47 INFO mapred.JobClient: Total committed heap usage (bytes)=258875392
13/06/19 15:02:47 INFO mapred.JobClient: org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter
13/06/19 15:02:47 INFO mapred.JobClient: BYTES_READ=26
13/06/19 15:02:47 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1322)
at WordCount.run(WordCount.java:41)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
at WordCount.main(WordCount.java:46)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.util.RunJar.main(RunJar.java:208)