1

我正在尝试 JAR 一个简单的 scala 应用程序,该应用程序利用 SparlCSV 和 spark sql 创建存储在 HDFS 中的 CSV 文件的数据框,然后只需进行一个简单的查询即可返回 CSV 文件中特定列的 Max 和 Min。

当我使用 sbt 命令创建 JAR 时出现错误,稍后我将 curl 到 jobserver /jars 文件夹并从远程机器执行

代码:

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.SparkContext._
import org.apache.spark._
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext

object sparkSqlCSV extends SparkJob {
  def main(args: Array[String]) {
    val conf = new  SparkConf().setMaster("local[4]").setAppName("sparkSqlCSV")
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    val config = ConfigFactory.parseString("")
    val results = runJob(sc, config)
    println("Result is " + results)

  }

    override def validate(sc: sqlContext, config: Config):  SparkJobValidation = {
    SparkJobValid
  }

   override def runJob(sc: sqlContext, config: Config): Any = {
   val value = "com.databricks.spark.csv"
   val ControlDF = sqlContext.load(value,Map("path"->"hdfs://mycluster/user/Test.csv","header"->"true"))
   ControlDF.registerTempTable("Control")
   val aggDF = sqlContext.sql("select max(DieX) from Control")
   aggDF.collectAsList()

  }
}

错误:

[hduser@ptfhadoop01v spark-jobserver]$ sbt ashesh-jobs/package
[info] Loading project definition from /usr/local/hadoop/spark-jobserver/project
Missing bintray credentials /home/hduser/.bintray/.credentials. Some bintray  features depend on this.
Missing bintray credentials /home/hduser/.bintray/.credentials. Some bintray  features depend on this.
Missing bintray credentials /home/hduser/.bintray/.credentials. Some bintray features depend on this.
Missing bintray credentials /home/hduser/.bintray/.credentials. Some bintray features depend on this.
[info] Set current project to root (in build file:/usr/local/hadoop/spark-jobserver/)
[info] scalastyle using config /usr/local/hadoop/spark-jobserver/scalastyle-config.xml
[info] Processed 2 file(s)
[info] Found 0 errors
[info] Found 0 warnings
[info] Found 0 infos
[info] Finished in 9 ms
[success] created output: /usr/local/hadoop/spark-jobserver/ashesh-jobs/target
[warn] Credentials file /home/hduser/.bintray/.credentials does not exist
[info] Updating {file:/usr/local/hadoop/spark-jobserver/}ashesh-jobs...
[info] Resolving org.fusesource.jansi#jansi;1.4 ...
[info] Done updating.
[info] scalastyle using config /usr/local/hadoop/spark-jobserver/scalastyle-config.xml
[info] Processed 5 file(s)
[info] Found 0 errors
[info] Found 0 warnings
[info] Found 0 infos
[info] Finished in 1 ms
[success] created output: /usr/local/hadoop/spark-jobserver/job-server-api/target
[info] Compiling 2 Scala sources and 1 Java source to /usr/local/hadoop/spark-jobserver/ashesh-jobs/target/scala-2.10/classes...
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:8: object sql is not a member of   package org.apache.spark
[error] import org.apache.spark.sql.SQLContext
[error]                         ^
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:14: object sql is not a member of package org.apache.spark
[error]     val sqlContext = new org.apache.spark.sql.SQLContext(sc)
[error]                                           ^
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:25: not found: type sqlContext
[error]    override def runJob(sc: sqlContext, config: Config): Any = {
[error]                            ^
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:21: not found: type sqlContext
[error]     override def validate(sc: sqlContext, config: Config): SparkJobValidation = {
[error]                               ^
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:27: not found: value sqlContext
[error]    val ControlDF = sqlContext.load(value,Map("path"->"hdfs://mycluster/user/Test.csv","header"->"true"))
[error]                    ^
[error] /usr/local/hadoop/spark-jobserver/ashesh-jobs/src/spark.jobserver/sparkSqlCSV.scala:29: not found: value sqlContext
[error]    val aggDF = sqlContext.sql("select max(DieX) from Control")
[error]                ^
[error] 6 errors found
[error] (ashesh-jobs/compile:compileIncremental) Compilation failed
[error] Total time: 10 s, completed May 26, 2016 4:42:52 PM
[hduser@ptfhadoop01v spark-jobserver]$

我猜主要问题是它缺少 sparkCSV 和 sparkSQL 的依赖项,但我不知道在使用 sbt 编译代码之前将依赖项放在哪里。

我发出以下命令来打包应用程序,源代码放在“ ashesh_jobs ”目录下

[hduser@ptfhadoop01v spark-jobserver]$ sbt ashesh-jobs/package

我希望有人能帮我解决这个问题。你能指定我可以指定依赖项和输入格式的文件吗

4

2 回答 2

0

以下链接包含有关创建其他上下文的更多信息https://github.com/spark-jobserver/spark-jobserver/blob/master/doc/contexts.md

你还需要工作服务器附加

于 2016-05-26T12:29:13.407 回答
0

在 build.sbt 中添加库依赖

libraryDependencies += "org.apache.spark" %% "spark-sql" % "1.6.2"

于 2016-08-10T18:06:20.233 回答