1

我想运行一个 select 语句并将结果放入一个表中,我确定它不是语法错误。

总部:

INSERT overwrite table datalake_rci.MID_DealerVehicleOutputValue
--MIDDealerVehicleOutputValueID int,
select
b.MIDDealerVehicleOutputValueID
,b.DealerID --string,
,b.CHASSIS --string,
,b.DIMDealerID --bigint,
,b.DIMVehicleID --bigint,

,case when a.DIMDealerID is not null
      then 1 else b.DIMOutputValueID
end DIMOutputValueID  --int     

,b.OutputValueName --string,
,b.OutputValueName_CN --string,
,b.OutputValueCode --varchar(50),
,b.OutputValueOrder --int
from datalake_rci.MID_DealerVehicleOutputValue b
left outer join
(
    select w.low,w.DIMDealerID, w.DIMVehicleID,w.OutputValueOrder,w.row_num from (
        select z.low,z.DIMDealerID, z.DIMVehicleID, z.OutputValueOrder,
               row_number() over(partition by z.DIMDealerID order by z.OutputValueOrder desc) row_num
        from
        (
            select t1.low,y.DIMDealerID, y.DIMVehicleID, y.OutputValueOrder
           from
            (
                select b.DIMDealerID, b.cnt*l.Rate low
                from
                    (select DIMDealerID, count(*) cnt
                        from datalake_rci.MID_DealerVehicleOutputValue
                        group by DIMDealerID) b
                    cross join
                        (select Rate from datalake_rci.DIM_OutputValue where OutputValueCode = 'Low') l
            ) t1
            inner join
            (select DIMDealerID, DIMVehicleID, OutputValueOrder
                from datalake_rci.MID_DealerVehicleOutputValue) y
            on t1.DIMDealerID = y.DIMDealerID
        ) z
    ) w
    where w.row_num <= w.low
) a on b.DIMDealerID = a.DIMDealerID;

然后我得到以下输出:

--------------------------------------------------------------------------------
        VERTICES      STATUS  TOTAL  COMPLETED  RUNNING  PENDING  FAILED  KILLED
--------------------------------------------------------------------------------
Map 1 .........      RUNNING    207        200        0        7       0       0
Map 6 ..........   SUCCEEDED      1          1        0        0       0       0
Map 7 .........      RUNNING    207        198        0        9       0       0
Map 8 .........      RUNNING    207        201        0        6       0       0
Reducer 2 .....      RUNNING     40         37        0        3       0       0
Reducer 3 .....      RUNNING     98         94        0        4       0       0
Reducer 4 .....      RUNNING     44         41        0        3       0       0
Reducer 5 ...        RUNNING    746        376        0      370      35     233
--------------------------------------------------------------------------------
VERTICES: 01/08  [===================>>-------] 74%   ELAPSED TIME: 5795.98 s  
--------------------------------------------------------------------------------
ERROR : Status: Failed
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03
ERROR : Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00
ERROR : Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04
ERROR : Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05
ERROR : Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06
ERROR : Vertex failed, vertexName=Reducer 5, vertexId=vertex_1549678950511_24672_1_07, diagnostics=[Task failed, taskId=task_1549678950511_24672_1_07_000375, diagnostics=[TaskAttempt 0 failed, info=[Container container_1549678950511_24672_01_000184 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]], TaskAttempt 1 failed, info=[Error: exceptionThrown=org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$ShuffleError: error in shuffle in fetcher {Reducer_4} #2
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:382)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:334)
              at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
              at java.util.concurrent.FutureTask.run(FutureTask.java:266)
              at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
              at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
              at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NullPointerException
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1601)
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492)
              at org.apache.tez.runtime.library.common.shuffle.HttpConnection.getInputStream(HttpConnection.java:253)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.setupConnection(FetcherOrderedGrouped.java:356)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.copyFromHost(FetcherOrderedGrouped.java:264)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.fetchNext(FetcherOrderedGrouped.java:176)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.run(FetcherOrderedGrouped.java:191)
, errorMessage=Shuffle Runner Failed:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$ShuffleError: error in shuffle in fetcher {Reducer_4} #2
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:382)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:334)
              at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
              at java.util.concurrent.FutureTask.run(FutureTask.java:266)
              at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
              at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
              at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NullPointerException
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1601)
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492)
              at org.apache.tez.runtime.library.common.shuffle.HttpConnection.getInputStream(HttpConnection.java:253)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.setupConnection(FetcherOrderedGrouped.java:356)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.copyFromHost(FetcherOrderedGrouped.java:264)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.fetchNext(FetcherOrderedGrouped.java:176)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.run(FetcherOrderedGrouped.java:191)
], TaskAttempt 2 failed, info=[Container container_1549678950511_24672_01_000292 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]], TaskAttempt 3 failed, info=[Container container_1549678950511_24672_01_000312 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:369, Vertex vertex_1549678950511_24672_1_07 [Reducer 5] killed/failed due to:OWN_TASK_FAILURE]
ERROR : Vertex killed, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:3, Vertex vertex_1549678950511_24672_1_06 [Reducer 4] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : Vertex killed, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:4, Vertex vertex_1549678950511_24672_1_05 [Reducer 3] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : Vertex killed, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:3, Vertex vertex_1549678950511_24672_1_04 [Reducer 2] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : Vertex killed, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:7, Vertex vertex_1549678950511_24672_1_03 [Map 1] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : Vertex killed, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:9, Vertex vertex_1549678950511_24672_1_01 [Map 7] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : Vertex killed, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:6, Vertex vertex_1549678950511_24672_1_00 [Map 8] killed/failed due to:OTHER_VERTEX_FAILURE]
ERROR : DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:6
Error: Error while processing statement: FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03Vertex re-running, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00Vertex re-running, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04Vertex re-running, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05Vertex re-running, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06Vertex failed, vertexName=Reducer 5, vertexId=vertex_1549678950511_24672_1_07, diagnostics=[Task failed, taskId=task_1549678950511_24672_1_07_000375, diagnostics=[TaskAttempt 0 failed, info=[Container container_1549678950511_24672_01_000184 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]], TaskAttempt 1 failed, info=[Error: exceptionThrown=org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$ShuffleError: error in shuffle in fetcher {Reducer_4} #2
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:382)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:334)
              at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
              at java.util.concurrent.FutureTask.run(FutureTask.java:266)
              at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
              at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
              at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NullPointerException
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1601)
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492)
              at org.apache.tez.runtime.library.common.shuffle.HttpConnection.getInputStream(HttpConnection.java:253)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.setupConnection(FetcherOrderedGrouped.java:356)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.copyFromHost(FetcherOrderedGrouped.java:264)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.fetchNext(FetcherOrderedGrouped.java:176)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.run(FetcherOrderedGrouped.java:191)
, errorMessage=Shuffle Runner Failed:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$ShuffleError: error in shuffle in fetcher {Reducer_4} #2
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:382)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.Shuffle$RunShuffleCallable.callInternal(Shuffle.java:334)
              at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
              at java.util.concurrent.FutureTask.run(FutureTask.java:266)
              at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
              at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
              at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NullPointerException
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1601)
              at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492)
              at org.apache.tez.runtime.library.common.shuffle.HttpConnection.getInputStream(HttpConnection.java:253)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.setupConnection(FetcherOrderedGrouped.java:356)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.copyFromHost(FetcherOrderedGrouped.java:264)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.fetchNext(FetcherOrderedGrouped.java:176)
              at org.apache.tez.runtime.library.common.shuffle.orderedgrouped.FetcherOrderedGrouped.run(FetcherOrderedGrouped.java:191)
], TaskAttempt 2 failed, info=[Container container_1549678950511_24672_01_000292 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]], TaskAttempt 3 failed, info=[Container container_1549678950511_24672_01_000312 finished with diagnostics set to [Container failed, exitCode=-100. Container released on a *lost* node]]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:369, Vertex vertex_1549678950511_24672_1_07 [Reducer 5] killed/failed due to:OWN_TASK_FAILURE]Vertex killed, vertexName=Reducer 4, vertexId=vertex_1549678950511_24672_1_06, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:3, Vertex vertex_1549678950511_24672_1_06 [Reducer 4] killed/failed due to:OTHER_VERTEX_FAILURE]Vertex killed, vertexName=Reducer 3, vertexId=vertex_1549678950511_24672_1_05, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:4, Vertex vertex_1549678950511_24672_1_05 [Reducer 3] killed/failed due to:OTHER_VERTEX_FAILURE]Vertex killed, vertexName=Reducer 2, vertexId=vertex_1549678950511_24672_1_04, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:3, Vertex vertex_1549678950511_24672_1_04 [Reducer 2] killed/failed due to:OTHER_VERTEX_FAILURE]Vertex killed, vertexName=Map 1, vertexId=vertex_1549678950511_24672_1_03, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:7, Vertex vertex_1549678950511_24672_1_03 [Map 1] killed/failed due to:OTHER_VERTEX_FAILURE]Vertex killed, vertexName=Map 7, vertexId=vertex_1549678950511_24672_1_01, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:9, Vertex vertex_1549678950511_24672_1_01 [Map 7] killed/failed due to:OTHER_VERTEX_FAILURE]Vertex killed, vertexName=Map 8, vertexId=vertex_1549678950511_24672_1_00, diagnostics=[Vertex received Kill while in RUNNING state., Vertex did not succeed due to OTHER_VERTEX_FAILURE, failedTasks:0 killedTasks:6, Vertex vertex_1549678950511_24672_1_00 [Map 8] killed/failed due to:OTHER_VERTEX_FAILURE]DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:6 (state=08S01,code=2)

我已经尝试了两次,但我得到了相同的结果。顺便说一句,MID_DealerVehicleOutputValue表中总共有 336258079 行。这是导致错误吗?在此之前,其他一些类似的语句已成功运行,但要处理的行数不多。

4

2 回答 2

2

在进行任何内存调整之前,请尝试以更好的方式重写您的查询。它会做不必要的工作来执行额外的连接。首先,您可以通过使用计算 count(*) 的子查询删除不必要的内部连接来大大简化它。改为使用分析count(*) over(partition by DIMDealerID)

INSERT overwrite table datalake_rci.MID_DealerVehicleOutputValue
--MIDDealerVehicleOutputValueID int,
select
b.MIDDealerVehicleOutputValueID
,b.DealerID --string,
,b.CHASSIS --string,
,b.DIMDealerID --bigint,
,b.DIMVehicleID --bigint,

,case when a.DIMDealerID is not null
      then 1 else b.DIMOutputValueID
end DIMOutputValueID  --int     

,b.OutputValueName --string,
,b.OutputValueName_CN --string,
,b.OutputValueCode --varchar(50),
,b.OutputValueOrder --int
from datalake_rci.MID_DealerVehicleOutputValue b
left outer join
(
    select w.low,w.DIMDealerID, w.DIMVehicleID,w.OutputValueOrder,w.row_num from 
    (
        select z.low,z.DIMDealerID, z.DIMVehicleID, z.OutputValueOrder,
               row_number() over(partition by z.DIMDealerID order by z.OutputValueOrder desc) row_num
        from
        (
           select DIMDealerID, DIMVehicleID, OutputValueOrder,
                    count(*) over(partition by DIMDealerID) * l.Rate low
                from datalake_rci.MID_DealerVehicleOutputValue 
                cross join
                        (select Rate from datalake_rci.DIM_OutputValue where OutputValueCode = 'Low') l

        ) z
    ) w
    where w.row_num <= w.low
) a on b.DIMDealerID = a.DIMDealerID; 

并且作为查询优化的下一步,尝试删除不必要left join的 . 像这样:

INSERT overwrite table datalake_rci.MID_DealerVehicleOutputValue
--MIDDealerVehicleOutputValueID int,
select
 s.MIDDealerVehicleOutputValueID
,s.DealerID --string,
,s.CHASSIS --string,
,s.DIMDealerID --bigint,
,s.DIMVehicleID --bigint,

,case when s.row_num <= s.low        --you do not need join to calculate this
      then 1 else s.DIMOutputValueID
  end DIMOutputValueID  --int     

,s.OutputValueName --string,
,s.OutputValueName_CN --string,
,s.OutputValueCode --varchar(50),
,s.OutputValueOrder --int
from 
    (
        select s.low,s.DIMDealerID, s.DIMVehicleID, s.OutputValueOrder, s.MIDDealerVehicleOutputValueID,s.DealerID,s.CHASSIS,s.OutputValueName, s.OutputValueName_CN,s.OutputValueCode,s.OutputValueOrder
               row_number() over(partition by s.DIMDealerID order by s.OutputValueOrder desc) row_num
        from
        (
           select s.DIMDealerID, s.DIMVehicleID, s.OutputValueOrder, s.MIDDealerVehicleOutputValueID,s.DealerID,s.CHASSIS,s.OutputValueName, s.OutputValueName_CN,s.OutputValueCode,s.OutputValueOrder
                    count(*) over(partition by DIMDealerID) * l.Rate low
                from datalake_rci.MID_DealerVehicleOutputValue s
                cross join
                        (select Rate from datalake_rci.DIM_OutputValue where OutputValueCode = 'Low') l

        ) s
    ) s; --one or two subqueries also can be removed

当然,我的查询可能包含一些错误,应该仔细测试,但我希望你已经明白了。几乎总是可以消除自联接。最后,您的查询将只读取每个表一次,并且将消除许多其他繁重的步骤。我希望您将摆脱至少两个 reducer 和两个 mapper 顶点。

另外我建议增加映射器的并行性。调整这些设置,尝试减少数字,直到运行更多映射器:

--tune mapper parallelizm
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
set tez.grouping.max-size=67108864;
set tez.grouping.min-size=32000000;
于 2019-02-21T07:40:13.227 回答
0

您应该更改 的值mapreduce.reduce.shuffle.memory.limit.percent

此参数用于单个 shuffle(从单个 Map 任务复制的输出)应占用的上述内存缓冲区的最大百分比。超过此大小的 shuffle 大小不会被复制到内存缓冲区,而是直接写入 reducer 的磁盘。

尝试减小此参数的值,然后再次运行查询。

还要确保值mapreduce.reduce.shuffle.merge.percent低于mapreduce.reduce.shuffle.memory.limit.percent

于 2019-02-21T06:07:08.267 回答