我是 Map-Reduce 编程范式的新手。所以,我的问题对很多人来说可能听起来很愚蠢。但是,我请求大家多多包涵。
我正在尝试计算文件中特定单词的出现次数。现在,我为此编写了以下 Java 类。
输入文件有以下条目:
The tiger entered village in the night the the \
Then ... the story continues...
I have put the word 'the' many times because of my own program purpose.
WordCountMapper.java
package com.demo.map_reduce.word_count.mapper;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>
{
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException {
if(null != value) {
final String line = value.toString();
if(StringUtils.containsIgnoreCase(line, "the")) {
context.write(new Text("the"), new IntWritable(StringUtils.countMatches(line, "the")));
}
}
}
}
WordCountReducer.java
package com.demo.map_reduce.word_count.reducer;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
@SuppressWarnings({ "rawtypes", "unchecked" })
public void reduce(Text key, Iterable<IntWritable> values, org.apache.hadoop.mapreduce.Reducer.Context context)
throws IOException, InterruptedException {
int count = 0;
for (final IntWritable nextValue : values) {
count += nextValue.get();
}
context.write(key, new IntWritable(count));
}
}
WordCounter.java
package com.demo.map_reduce.word_count;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.demo.map_reduce.word_count.mapper.WordCountMapper;
import com.demo.map_reduce.word_count.reducer.WordCountReducer;
public class WordCounter
{
public static void main(String[] args) {
final String inputDataPath = "/input/my_wordcount_1/input_data_file.txt";
final String outputDataDir = "/output/my_wordcount_1";
try {
final Job job = Job.getInstance();
job.setJobName("Simple word count");
job.setJarByClass(WordCounter.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(inputDataPath));
FileOutputFormat.setOutputPath(job, new Path(outputDataDir));
job.waitForCompletion(true);
}
} catch (Exception e) {
e.printStackTrace();
}
}
当我在 Hadoop 中运行这个程序时,我得到了以下输出。
the 2
the 1
the 3
我希望减速器产生
the 4
我确定我做错了什么;或者我可能没有正确理解。有人可以在这里帮助我吗?
提前致谢。
-尼兰詹