我使用 Mahout 创建了以下 Mapper 和 Reducer
package mypackage.ItemSimilarity;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.mahout.math.VarLongWritable;
public class ItemPrefMapper extends
Mapper<LongWritable, Text, VarLongWritable, VarLongWritable> {
private static final Pattern NUMBERS = Pattern.compile("(\\d+)");
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
Matcher m = NUMBERS.matcher(line);
m.find();
VarLongWritable userID = new VarLongWritable(Long.parseLong(m.group()));
VarLongWritable itemID = new VarLongWritable();
while (m.find()) {
itemID.set(Long.parseLong(m.group()));
context.write(userID, itemID);
}
}
}
减少类
package mypackage.ItemSimilarity;
import java.io.IOException;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.VarLongWritable;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.VectorWritable;
public class UserVectorReducer
extends
Reducer<VarLongWritable, VarLongWritable, VarLongWritable, VectorWritable> {
@Override
public void reduce(VarLongWritable userID,
Iterable<VarLongWritable> itemPrefs, Context context)
throws IOException, InterruptedException {
Vector userVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
for (VarLongWritable itemPref : itemPrefs) {
userVector.set((int) itemPref.get(), 1.0f);
}
context.write(userID, new VectorWritable(userVector));
}
}
运行这个的 Spring 配置
<job id="mahoutJob" input-path="/home/ubuntu/input/data.txt" output-path="/home/ubuntu/output"
mapper="mypackage.ItemSimilarity.ItemPrefMapper"
reducer="mypackage.ItemSimilarity.UserVectorReducer"
jar-by-class="mypackage.ItemSimilarity.ItemPrefMapper"/>
<job-runner id="myjob-runner" pre-action="setupScript" job-ref="mahoutJob"
run-at-startup="true"/>
当我运行它时,出现以下错误。我已经扩展了 Hadoop 映射器类,但 spring 说它不是映射器类。
java.lang.RuntimeException:类 mypackage.ItemSimilarity.ItemPrefMapper 不是 org.apache.hadoop.mapreduce.Mapper 在 org.apache.hadoop.conf.Configuration.setClass(Configuration.java:931) 在 org.apache.hadoop.mapreduce.Job.setMapperClass(Job.java:175) 在 org.springframework.data.hadoop.mapreduce.JobFactoryBean .afterPropertiesSet(JobFactoryBean.java:153) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1509) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1571) ) 在 org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:458) 的 org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:521)