3

我正在尝试运行第 7 章中的 hello world 示例。我在 eclipse 中创建了以下内容,然后将其打包到一个 jar 中:-

package com.mycode.mahout
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.mahout.clustering.WeightedVectorWritable;
import org.apache.mahout.clustering.kmeans.Cluster;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.common.distance.EuclideanDistanceMeasure;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.VectorWritable;

public class SimpleKMeansClustering {
  public static final double[][] points = { {1, 1}, {2, 1}, {1, 2},
                                           {2, 2}, {3, 3}, {8, 8},
                                           {9, 8}, {8, 9}, {9, 9}};

  public static void writePointsToFile(List<Vector> points,
                                       String fileName,
                                       FileSystem fs,
                                       Configuration conf) throws IOException {
    Path path = new Path(fileName);
    SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
        path, LongWritable.class, VectorWritable.class);
    long recNum = 0;
    VectorWritable vec = new VectorWritable();
    for (Vector point : points) {
      vec.set(point);
      writer.append(new LongWritable(recNum++), vec);
    }
    writer.close();
  }

  public static List<Vector> getPoints(double[][] raw) {
    List<Vector> points = new ArrayList<Vector>();
    for (int i = 0; i < raw.length; i++) {
      double[] fr = raw[i];
      Vector vec = new RandomAccessSparseVector(fr.length);
      vec.assign(fr);
      points.add(vec);
    }
    return points;
  }

  public static void main(String args[]) throws Exception {

    int k = 2;

    List<Vector> vectors = getPoints(points);

    File testData = new File("testdata");
    if (!testData.exists()) {
      testData.mkdir();
    }
    testData = new File("testdata/points");
    if (!testData.exists()) {
      testData.mkdir();
    }

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    writePointsToFile(vectors, "testdata/points/file1", fs, conf);

    Path path = new Path("testdata/clusters/part-00000");
    SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
        path, Text.class, Cluster.class);

    for (int i = 0; i < k; i++) {
      Vector vec = vectors.get(i);
      Cluster cluster = new Cluster(vec, i, new EuclideanDistanceMeasure());
      writer.append(new Text(cluster.getIdentifier()), cluster);
    }
    writer.close();

    KMeansDriver.run(conf, new Path("testdata/points"), new Path("testdata/clusters"),
      new Path("output"), new EuclideanDistanceMeasure(), 0.001, 10,
      true, false);

    SequenceFile.Reader reader = new SequenceFile.Reader(fs,
        new Path("output/" + Cluster.CLUSTERED_POINTS_DIR
                 + "/part-m-00000"), conf);

    IntWritable key = new IntWritable();
    WeightedVectorWritable value = new WeightedVectorWritable();
    while (reader.next(key, value)) {
      System.out.println(value.toString() + " belongs to cluster "
                         + key.toString());
    }
    reader.close();
  }

}

我把它打包成 myjob.jar

现在我应该如何在我的集群上执行这个?

我尝试了以下操作:-

hadoop jar myjob.jar com.mycode.mahout.SimpleKMeansClustering 
java -jar myjob.jar
java -cp myjob.jar

我收到以下错误:-

 [root@node1 tmp]# hadoop jar mahoutfirst.jar com.mahout.emc.SimpleKMeansClustering 
    Exception in thread "main" java.lang.NoClassDefFoundError:         org/apache/mahout/math/Vector`
        at java.lang.Class.forName0(Native Method)
        at java.lang.Class.forName(Class.java:270)
        at org.apache.hadoop.util.RunJar.main(RunJar.java:201)
    Caused by: java.lang.ClassNotFoundException: org.apache.mahout.math.Vector
        at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
        at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
        at java.security.AccessController.doPrivileged(Native Method)
        at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        ... 3 more

请建议什么是运行使用 mahout 编写的代码的正确方法。

4

2 回答 2

3

即使这很晚了,但我遇到了类似的问题,并且以下方法对我有用,因为我不想使用 maven:

1) 转到您的 mahout 安装目录并查找 *job.jar 作为

ls /usr/lib/mahout/
conf  lib       mahout-core-0.5-cdh3u3-job.jar  mahout-examples-0.5-cdh3u3-job.jar  mahout-taste-webapp-0.5-cdh3u3.war

2)复制mahout-examples-0.5-cdh3u3-job.jar到代码所在目录

3) 使用 Mahout 提供的“job”JAR 文件。它打包了所有依赖项。您还需要将您的课程添加到其中。当您使用 hadoop 和 mahout 库编译您的类时,您的 .class 文件已准备就绪。

4) 将您的类文件添加到您目录中的作业 jar mahout-core-0.5-cdh3u3-job.jar 中:

jar uf mahout-core-0.5-cdh3u3-job.jar SimpleKMeansClustering.class

4)使用您的代码运行hadoop jar:

hadoop jar mahout-core-0.5-cdh3u3-job.jar  SimpleKMeansClustering

5) 在您的 map-reduce 作业结束时,您可以看到:

1.0: [1.000, 1.000] belongs to cluster 0
1.0: [2.000, 1.000] belongs to cluster 0
1.0: [1.000, 2.000] belongs to cluster 0
1.0: [2.000, 2.000] belongs to cluster 0
1.0: [3.000, 3.000] belongs to cluster 0
1.0: [8.000, 8.000] belongs to cluster 1
1.0: [9.000, 8.000] belongs to cluster 1
1.0: [8.000, 9.000] belongs to cluster 1
1.0: [9.000, 9.000] belongs to cluster 1
于 2014-03-18T13:35:23.890 回答
0

查看上面的 not class definition found exception,您可能需要在您的 Hadoop 作业中包含 Mahout 相关的 jar(我猜是 mahout-core.jar)。

要将 jars 传递给整个集群的映射器,您可能需要使用 DistributedCache 或-libjarHadoop 选项。后者的想法在这里解释

于 2013-10-16T10:57:48.177 回答