我正在尝试在我的 mapreduce 程序中读取 word 文档文件,为此我使用了用户定义的fileInputFormat
类WordDocxInputFormat
和WordDocxInputFormatRecordReader
. 在WordDocxInputFormatRecordReader
课堂上,我使用 Apache POI 来读取 word.docx
文件。但是我遇到了java.lang.ClassNotFoundException
运行时错误。我在 windows 7 平台上使用 Eclipse 和 Hadoop-0.20.2。
我已将我的定义CLASSPATH
为:JAVA_HOME\lib;C:\cygwin\home\bmohanty6\poijars\;
在C:\cygwin\home\bmohanty6\poijars\
我保留了 POI 所需的 jar 文件(在附图中)并将它们添加到Project->property->libraries->add external jar
.
我收到错误
13/09/17 12:35:26 INFO mapred.JobClient: Task Id : attempt_201309101108_0040_m_000000_2, Status : FAILED
Error: java.lang.ClassNotFoundException: org.apache.poi.xwpf.usermodel.XWPFDocument
at java.net.URLClassLoader$1.run(Unknown Source)
at java.net.URLClassLoader$1.run(Unknown Source)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(Unknown Source)
at java.lang.ClassLoader.loadClass(Unknown Source)
at sun.misc.Launcher$AppClassLoader.loadClass(Unknown Source)
at java.lang.ClassLoader.loadClass(Unknown Source)
at WordDocxInputFormat$WordDocxInputFormatRecordReader.next(WordDocxInputFormat.java:112)
at WordDocxInputFormat$WordDocxInputFormatRecordReader.next(WordDocxInputFormat.java:1)
at org.apache.hadoop.mapred.MapTask$TrackedRecordReader.moveToNext(MapTask.java:192)
at org.apache.hadoop.mapred.MapTask$TrackedRecordReader.next(MapTask.java:176)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:48)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:358)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:307)
at org.apache.hadoop.mapred.Child.main(Child.java:170)
这是我的 WordDocxInputFormat.class
import java.io.IOException;
import java.util.Arrays;
import java.io.FileInputStream;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.poi.xwpf.usermodel.*;
import org.apache.poi.xwpf.extractor.*;
/**
* Reads complete documents in Binary format.
*/
public class WordDocxInputFormat
extends FileInputFormat<Text, Text> {
public WordDocxInputFormat() {
super();
}
protected boolean isSplitable(FileSystem fs, Path filename) {
return false;
}
@Override
public RecordReader<Text, Text> getRecordReader(
InputSplit split, JobConf job, Reporter reporter) throws IOException {
return new WordDocxInputFormatRecordReader((FileSplit) split, job);
}
/**
* WordDocxInputFormatRecordReader class to read through a given binary document
* Outputs the filename along with the complete document
*/
public class WordDocxInputFormatRecordReader
implements RecordReader<Text, Text> {
private final FileSplit fileSplit;
private final Configuration conf;
private boolean processed = false;
public WordDocxInputFormatRecordReader(FileSplit fileSplit, Configuration conf)
throws IOException {
this.fileSplit = fileSplit;
this.conf = conf;
}
@Override
public Text createKey() {
return new Text();
}
@Override
public Text createValue() {
return new Text();
}
@Override
public long getPos() throws IOException {
return this.processed ? this.fileSplit.getLength() : 0;
}
@Override
public float getProgress() throws IOException {
return this.processed ? 1.0f : 0.0f;
}
@Override
public boolean next(Text key, Text value) throws IOException {
if (!this.processed) {
Path file = this.fileSplit.getPath();
try{
XWPFDocument docx = new XWPFDocument(new FileInputStream(file.toString()));
XWPFWordExtractor we = new XWPFWordExtractor(docx);
key.set(file.getName());
value.set(we.getText());
}
catch(IOException ex) {
Logger.getLogger(WordDocxInputFormatRecordReader.class.getName()).log(Level.SEVERE, null, ex);
}
this.processed = true;
return true;
}
else {
return false;
}
}
@Override
public void close() throws IOException {
}
}
}