0

我们正在使用fluentd将日志发送到aws kinesis firehose。我们可以看到很少有记录不时发送到 aws kinesis firehose。这是我们在fluentd中的设置。

    <system>
          log_level info
      </system>
      <source>
          @type tail
          path "/var/log/app/tracy.log*"
          pos_file "/var/tmp/tracy.log.pos"
          pos_file_compaction_interval 72h
          @log_level "error"
          tag "tracylog"
          <parse>
                @type "json"
                time_key False
          </parse>
      </source>
      <source>
         @type monitor_agent
         bind 127.0.0.1
         port 24220
      </source>
      <match tracylog>
          @type "kinesis_firehose"
          region "${awsRegion}"
          delivery_stream_name "${delivery_stream_name}"
          <instance_profile_credentials>
          </instance_profile_credentials>
          <buffer>
              # Frequency of ingestion
              flush_interval 30s
              flush_thread_count 4
              chunk_limit_size 1m
          </buffer>
      </match>
4

1 回答 1

0

配置中的一些更改解决了我的问题:

  <system>
      log_level info
  </system>
  <source>
      @type tail
      path "/var/log/app/tracy.log*"
      pos_file "/var/tmp/tracy.log.pos"
      pos_file_compaction_interval 72h
      read_from_head true
      follow_inodes true
      @log_level "error"
      tag "tracylog"
      <parse>
            @type "json"
            time_key False
      </parse>
  </source>
  <source>
     @type monitor_agent
     bind 127.0.0.1
     port 24220
  </source>
  <match tracylog>
      @type "kinesis_firehose"
      region "${awsRegion}"
      delivery_stream_name "${delivery_stream_name}"

      <instance_profile_credentials>
      </instance_profile_credentials>
      <buffer>
        flush_interval 2
        flush_thread_interval 0.1
        flush_thread_burst_interval 0.01
        flush_thread_count 8
      </buffer>
于 2021-10-01T11:48:37.720 回答