1

我在 Ubuntu 上使用带有 Jdk 1.7 的 netty 3.5.11 以非常高的频率接收大量的股票价格更新。发送的消息格式是 JSON。数据是从 redis 服务器上的主题订阅的。每个符号都有一个订阅者。通道对象被传递给多个订阅者,并在接收到数据时将其写入客户端。

现在在 2 分钟内接收到的数据量约为 25,000 条记录。每个记录大小平均约为 500 字节长。

在测试运行期间,大约 7500/8000 条记录被丢弃,因为通道不可写。我该如何避免这种情况。?

我还注意到延迟会系统性地增加,导致在很长一段时间后才收到更新。这发生在我使用 Bufferedwritehandler 来避免丢包时。

这是我在引导程序上设置的选项。

executionHandler = new ExecutionHandler(
            new OrderedMemoryAwareThreadPoolExecutor(Runtime.getRuntime().availableProcessors() * 2, 1000000, 10000000, 100,
            TimeUnit.MILLISECONDS));
serverBootStrap.setPipelineFactory(new ChannelPipelineFactory()
    {
        @Override
        public ChannelPipeline getPipeline() throws Exception
        {
            return Channels.pipeline(new PortUnificationServerHandler(getConfiguration(), executionHandler));
        }
    });

    serverBootStrap.setOption("child.tcpNoDelay", true);
    serverBootStrap.setOption("tcpNoDelay", true);
    serverBootStrap.setOption("child.keepAlive", true);
    serverBootStrap.setOption("child.reuseAddress", true);
    //setting buffer size can improve I/O
    serverBootStrap.setOption("child.sendBufferSize", 16777216);
    serverBootStrap.setOption("receiveBufferSize", 16777216);//1048576);
    // better to have an receive buffer predictor 
    serverBootStrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory(1024, 1024 * 16, 16777216));//1048576));

    //if the server is sending 1000 messages per sec, optimum write buffer water marks will
    //prevent unnecessary throttling, Check NioSocketChannelConfig doc   
    serverBootStrap.setOption("backlog", 1000);
    serverBootStrap.setOption("sendBufferSize", 16777216);//1048576);
    serverBootStrap.setOption("writeBufferLowWaterMark", 1024 * 1024 * 25);
    serverBootStrap.setOption("writeBufferHighWaterMark", 1024 * 1024 * 50);

管道和处理程序类

public class PortUnificationServerHandler extends FrameDecoder

{

private AppConfiguration appConfiguration;
private final ExecutionHandler executionHandler;

public PortUnificationServerHandler(AppConfiguration pAppConfiguration, ExecutionHandler pExecutionHandler)
{
    appConfiguration = pAppConfiguration;
    this.executionHandler = pExecutionHandler;
}

@Override
protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception
{

    String lRequest = buffer.toString(CharsetUtil.UTF_8);
    if (ConnectionServiceHelper.isValidJSON(lRequest))
    {
        ObjectMapper lObjectMapper = new ObjectMapper();
        StringReader lStringReader = new StringReader(lRequest);
        JsonNode lNode = lObjectMapper.readTree(lStringReader);
        if (lNode.get(Constants.REQUEST_TYPE).asText().trim().equalsIgnoreCase(Constants.LOGIN_REQUEST))
        {
            JsonNode lDataNode1 = lNode.get(Constants.REQUEST_DATA);
            LoginRequest lLogin = lObjectMapper.treeToValue(lDataNode1, LoginRequest.class);

            if (lLogin.getCompress() != null)
            {
                if (lLogin.getCompress().trim().equalsIgnoreCase(Constants.COMPRESS_FLAG_TRUE))
                {
                    enableJSON(ctx);
                    enableGzip(ctx);
                    ctx.getPipeline().remove(this);
                }
                else
                {
                    enableJSON(ctx);
                    ctx.getPipeline().remove(this);
                }
            }
            else
            {
                enableJSON(ctx);
                ctx.getPipeline().remove(this);

            }
        }
    }

    // Forward the current read buffer as is to the new handlers.
    return buffer.readBytes(buffer.readableBytes());
}

private void enableJSON(ChannelHandlerContext ctx)
{
    ChannelPipeline pipeline = ctx.getPipeline();

   boolean lHandlerExists = pipeline.getContext("bufferedwriter") != null;

    if (!lHandlerExists)
    {
        pipeline.addFirst("bufferedwriter", new MyBufferedWriteHandler()); // 80960
    }

    boolean lHandlerExists = pipeline.getContext("framer") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("framer", new DelimiterBasedFrameDecoder(65535,
                new ChannelBuffer[]
                {
                    ChannelBuffers.wrappedBuffer(
                    new byte[]
                    {
                        '\n'
                    })
                }));
    }

    lHandlerExists = pipeline.getContext("decoder") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("decoder", new StringDecoder(CharsetUtil.UTF_8));
    }

    lHandlerExists = pipeline.getContext("encoder") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("encoder", new StringEncoder(CharsetUtil.UTF_8));
    }

    lHandlerExists = pipeline.getContext("executor") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("executor", executionHandler);
    }

    lHandlerExists = pipeline.getContext("handler") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("handler", new ConnectionServiceUpStreamHandler(appConfiguration));
    }

    lHandlerExists = pipeline.getContext("unite") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("unite", new PortUnificationServerHandler(appConfiguration, executionHandler));
    }
}

private void enableGzip(ChannelHandlerContext ctx)
{
    ChannelPipeline pipeline = ctx.getPipeline();

    //pipeline.remove("decoder");
    //pipeline.addLast("decoder", new MyStringDecoder(CharsetUtil.UTF_8, true));
    //pipeline.addLast("compress", new CompressionHandler(80, "gzipdeflater"));
    boolean lHandlerExists = pipeline.getContext("encoder") != null;
    if (lHandlerExists)
    {
        pipeline.remove("encoder");
    }


    lHandlerExists = pipeline.getContext("gzipdeflater") != null;
    if (!lHandlerExists)
    {
        pipeline.addBefore("executor", "gzipdeflater", new ZlibEncoder(ZlibWrapper.GZIP));
    }


    lHandlerExists = pipeline.getContext("lengthprepender") != null;
    if (!lHandlerExists)
    {
        pipeline.addAfter("gzipdeflater", "lengthprepender", new LengthFieldPrepender(4));
    }

}

}

BufferedWriterHandler

   public class MyBufferedWriteHandler extends BufferedWriteHandler

{

private final AtomicLong bufferSize = new AtomicLong();
final Logger logger = LoggerFactory.getLogger(getClass());


 public MyBufferedWriteHandler() {
     // Enable consolidation by default.
     super(true);
 }

@Override
public void writeRequested(ChannelHandlerContext ctx, MessageEvent e) throws Exception
{
     ChannelBuffer data = (ChannelBuffer) e.getMessage();

    if (e.getChannel().isWritable())
    {
        long newBufferSize = bufferSize.get();
        // Flush the queue if it gets larger than 8KiB.
        if (newBufferSize > 0)
        {
            flush();
            bufferSize.set(0);
        }
        ctx.sendDownstream(e);
    }
    else
    {
        logger.warn( "Buffering data for : " + e.getChannel().getRemoteAddress() );
        super.writeRequested(ctx, e);
        bufferSize.addAndGet(data.readableBytes());
    }
}

@Override
public void channelInterestChanged(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception
{
    if (e.getChannel().isWritable())
    {
        flush();
    }
}

订阅者类中用于写入数据的函数

 public void writeToClient(Channel pClientChannel, String pMessage) throws IOException
{
    String lMessage = pMessage;
    if (pClientChannel.isWritable())
    {

            lMessage += Constants.RESPONSE_DELIMITER;
            pClientChannel.write(lMessage);

    }
    else
    {
         logger.warn(DroppedCounter++ + " droppped : " + pMessage);
    }
}

我已经实施了一些我在 stackoverflow 和其他网站上阅读的建议。但我没有成功解决这个问题。

请就我缺少什么提出建议或建议?

谢谢

4

0 回答 0