0

我们有一个安装在 AmazoneWeb 服务服务器上的 Couchbase 实例,以及在同一台服务器上运行的 Elastic Search 实例。

他们两个之间的连接正常,目前复制良好,直到... 出乎意料的是,我们在 ElasticSearch 上收到以下错误日志:

[2013-08-29 21:27:34,947][WARN ][cluster.metadata         ] [01-Thor] failed to dynamically update the mapping in cluster_state from shard
 java.lang.OutOfMemoryError: Java heap space
    at org.apache.lucene.util.ArrayUtil.grow(ArrayUtil.java:343)
    at org.elasticsearch.common.io.FastByteArrayOutputStream.write(FastByteArrayOutputStream.java:103)
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:1848)
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator.writeString(UTF8JsonGenerator.java:436)
    at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeString(JsonXContentGenerator.java:84)
    at org.elasticsearch.common.xcontent.XContentBuilder.field(XContentBuilder.java:314)
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.doXContentBody(AbstractFieldMapper.java:601)
    at org.elasticsearch.index.mapper.core.NumberFieldMapper.doXContentBody(NumberFieldMapper.java:286)
    at org.elasticsearch.index.mapper.core.LongFieldMapper.doXContentBody(LongFieldMapper.java:338)
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.toXContent(AbstractFieldMapper.java:595)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.DocumentMapper.toXContent(DocumentMapper.java:700)
    at org.elasticsearch.index.mapper.DocumentMapper.refreshSource(DocumentMapper.java:682)
    at org.elasticsearch.index.mapper.DocumentMapper.<init>(DocumentMapper.java:342)
    at org.elasticsearch.index.mapper.DocumentMapper$Builder.build(DocumentMapper.java:224)
    at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:231)
    at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:380)
    at org.elasticsearch.index.mapper.MapperService.merge(MapperService.java:190)
    at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:185)
    at org.elasticsearch.cluster.service.InternalClusterService$2.run(InternalClusterService.java:229)
    at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:95)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
    at java.lang.Thread.run(Unknown Source)
[2013-08-29 21:27:56,948][WARN ][indices.ttl              ] [01-Thor] failed to execute ttl purge
 java.lang.OutOfMemoryError: Java heap space
    at org.apache.lucene.util.ByteBlockPool$Allocator.getByteBlock(ByteBlockPool.java:66)
    at org.apache.lucene.util.ByteBlockPool.nextBuffer(ByteBlockPool.java:202)
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:319)
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:274)
    at org.apache.lucene.search.ConstantScoreAutoRewrite$CutOffTermCollector.collect(ConstantScoreAutoRewrite.java:131)
    at org.apache.lucene.search.TermCollectingRewrite.collectTerms(TermCollectingRewrite.java:79)
    at org.apache.lucene.search.ConstantScoreAutoRewrite.rewrite(ConstantScoreAutoRewrite.java:95)
    at org.apache.lucene.search.MultiTermQuery$ConstantScoreAutoRewrite.rewrite(MultiTermQuery.java:220)
    at org.apache.lucene.search.MultiTermQuery.rewrite(MultiTermQuery.java:288)
    at org.apache.lucene.search.IndexSearcher.rewrite(IndexSearcher.java:639)
    at org.apache.lucene.search.IndexSearcher.createNormalizedWeight(IndexSearcher.java:686)
    at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:309)
    at org.elasticsearch.indices.ttl.IndicesTTLService.purgeShards(IndicesTTLService.java:186)
    at org.elasticsearch.indices.ttl.IndicesTTLService.access$000(IndicesTTLService.java:65)
    at org.elasticsearch.indices.ttl.IndicesTTLService$PurgerThread.run(IndicesTTLService.java:122)

 [2013-08-29 21:29:23,919][WARN ][indices.ttl              ] [01-Thor] failed to execute ttl purge
 java.lang.OutOfMemoryError: Java heap space

我们尝试更改几个内存值,但似乎无法正确完成。

有人遇到过同样的问题吗?

4

1 回答 1

1

一些故障排除提示:

  1. 通常明智地将一个 AWS 实例专用于 Elasticsearch 以实现可预测的性能/易于调试。

  2. 使用Bigdesk插件监控您的内存使用情况。这将向您显示您的内存瓶颈是否来自 Elasticsearch - 可能来自操作系统、同时进行大量查询和索引,或者其他一些意外。

  3. Elasticsearch 的 Java 堆应设置为您的盒子总内存的 50% 左右。

  4. 这个来自 Shay Banon 的要点提供了几种解决 Elasticsearch 中的内存问题的解决方案。

于 2013-09-10T05:55:28.413 回答