1

我正在尝试从 hdfs 中的 hive orc 压缩表数据中将数据摄取到 druid 中。对此的任何指示都会非常有帮助。

4

1 回答 1

0

假设您已经设置了 Druid 和 Yarn/MapReduce,您可以启动一个 index_hadoop 任务来执行您的要求。

有一个 druid-orc-extensions 允许读取 ORC 文件,我认为它不随标准版本提供,所以你必须以某种方式获得它(我们从源代码构建它)

(扩展列表http://druid.io/docs/latest/development/extensions.html


这是一个示例,它将摄取一堆 orc 文件并将间隔附加到数据源。POST 给一个霸主 http:// overlord :8090/druid/indexer/v1/task

(文档http://druid.io/docs/latest/ingestion/batch-ingestion.html

您可能需要根据您的发行版进行调整,我记得我们在 hortonworks 上遇到了一些找不到类的问题(classpathPrefix 将有助于调整 MapReduce 类路径)

{
  "type": "index_hadoop",
  "spec": {
    "ioConfig": {
      "type": "hadoop",
      "inputSpec": {
        "type": "granularity",
        "inputFormat": "org.apache.hadoop.hive.ql.io.orc.OrcNewInputFormat",
        "dataGranularity": "hour",
        "inputPath": "/apps/hive/warehouse/table1",
        "filePattern": ".*",
        "pathFormat": "'partition='yyyy-MM-dd'T'HH"
      }
    },
    "dataSchema": {
      "dataSource": "cube_indexed_from_orc",
      "parser": {
        "type": "orc",
        "parseSpec": {
          "format": "timeAndDims",
          "timestampSpec": {
            "column": "timestamp",
            "format": "nano"
          },
          "dimensionsSpec": {
            "dimensions": ["cola", "colb", "colc"],
            "dimensionExclusions": [],
            "spatialDimensions": []
          }
        },
        "typeString": "struct<timestamp:bigint,cola:bigint,colb:string,colc:string,cold:bigint>"
      },
      "metricsSpec": [{
        "type": "count",
        "name": "count"
      }],
      "granularitySpec": {
        "type": "uniform",
        "segmentGranularity": "DAY",
        "queryGranularity": "HOUR",
        "intervals": ["2017-06-14T00:00:00.000Z/2017-06-15T00:00:00.000Z"]
      }
    },
    "tuningConfig": {
      "type": "hadoop",
      "partitionsSpec": {
        "type": "hashed",
        "targetPartitionSize": 5000000
      },
      "leaveIntermediate": false,
      "forceExtendableShardSpecs": "true"
    }
  }
}
于 2017-06-15T15:51:21.387 回答