0

我有来自我们设备上运行的 N 个不同服务的 N 个不同日志文件。我想将 N 个文件合并到一个保持时间顺序的文件中。文件大小可以从几 KB 到 GB 不等。

N个日志文件格式相同,如下:

**********  LOGGING SESSION STARTED ************
* Hmsoa Version: 2.4.0.12
* Exe Path: c:\program files (x86)\silicon biosystems\deparray300a_driver\deparray300a_driver.exe
* Exe Version: 1.6.0.154
************************************************


TIME = 2017/02/01 11:12:12,180 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'Connect'->Enter;
TIME = 2017/02/01 11:12:12,196 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'Connect'->Exit=0;
TIME = 2017/02/01 11:12:12,196 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'CCisProxyLocal CONNECT - ok'->Enter;
TIME = 2017/02/01 11:12:12,196 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'CRecoveryAxesProxyLocal CONNECT - ok'->Enter;
TIME = 2017/02/01 11:12:12,196 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'CAmplifierProxyLocalV3 CONNECT - ok'->Enter;
TIME = 2017/02/01 11:12:12,196 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'SYSTEM_DIAGNOSIS_GET'->Enter;
TIME = 2017/02/01 11:12:12,211 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'SYSTEM_DIAGNOSIS_GET'->Exit=0;
TIME = 2017/02/01 11:12:12,211 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'LBL_SQUARE_SET'->Enter;
TIME = 2017/02/01 11:12:12,219 ; THID = 4924; CAT = ; LVL = 1000; LOG = API 'LBL_SQUARE_SET'->Exit=0;

由于我已经有 N 个不同的文件,到目前为止我所做的是应用一个外部排序算法,为每个文件读取一行:

#include "stdafx.h"
#include "boost/regex.hpp"
#include "boost/lexical_cast.hpp"
#include "boost\filesystem.hpp"
#include <string>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <climits>
#include <ctime>
namespace fs = boost::filesystem;

static const boost::regex expression(R"(^(?:(?:TIME\s=\s\d{4}\/\d{2}\/\d{2}\s)|(?:@))([0-9:.,]+))");
static const boost::regex nameFileEx(R"(^[\d\-\_]+(\w+\s?\w+|\w+))");
static const std::string path("E:\\2017-02-01"); 
//static const std::string path("E:\\TestLog");

unsigned long time2Milleseconds(const std::string & time)
{
    int a, b, c, d;
    if (sscanf_s(time.c_str(), "%d:%d:%d,%d", &a, &b, &c, &d) >= 3)
        return a * 3600000 + b * 60000 + c * 1000 + d;
}

void readAllFilesUntilLine7(std::vector<std::pair<std::ifstream, std::string>> & vifs)
{
    std::string line;
    for (int i = 0; i < vifs.size(); ++i)
    {
        int lineNumber = 0;
        while (lineNumber != 7 && std::getline(vifs[i].first, line))
        { 
            ++lineNumber;
        }
    }
}

void checkRegex(std::vector<std::pair<std::ifstream, std::string>> & vifs, std::vector<unsigned long> & logTime, std::vector<std::string> & lines, int index, int & counter)
{
    std::string line;
    boost::smatch what;
    if (std::getline(vifs[index].first, line))
    {
        if (boost::regex_search(line, what, expression))
        {
            logTime[index] = time2Milleseconds(what[1]);
        }
        lines[index] = line;
    }
    else
    {
        --counter;
        logTime[index] = ULONG_MAX;
    }
}

void mergeFiles(std::vector<std::pair<std::ifstream, std::string>> & vifs, std::vector<unsigned long> & logTime, std::vector<std::string> & lines, std::ofstream & file, int & counter)
{
    std::string line;
    boost::smatch what;
    int index = 0;
    for (int i = 0; i < vifs.size(); ++i)
    {
        checkRegex(vifs, logTime, lines, i, counter);
    }
    index = min_element(logTime.begin(), logTime.end()) - logTime.begin();
    file << lines[index] << " --> " << vifs[index].second << "\n";
    while (true)
    {
        checkRegex(vifs, logTime, lines, index, counter);
        index = min_element(logTime.begin(), logTime.end()) - logTime.begin();
        if (0 == counter)
            break;
        file << lines[index] << " --> " << vifs[index].second << "\n";
    }
}

int main()
{
    clock_t begin = clock();
    int cnt = std::count_if(fs::directory_iterator(path),fs::directory_iterator(),static_cast<bool(*)(const fs::path&)>(fs::is_regular_file));
    std::vector<std::pair<std::ifstream, std::string>> vifs(cnt);
    int index = 0;
    boost::smatch what;
    std::string file;
    for (fs::directory_iterator d(path); d != fs::directory_iterator(); ++d)
    {
        if (fs::is_regular_file(d->path()))
        {
            file = d->path().filename().string();
            if (boost::regex_search(file, what, nameFileEx))
            {
                vifs[index++] = std::make_pair(std::ifstream(d->path().string()), what[1]);
            }
        }
    }
    std::vector<unsigned long> logTime(cnt, ULONG_MAX);
    std::vector<std::string> lines(cnt);
    std::ofstream filename(path + "\\TestLog.txt");
    readAllFilesUntilLine7(vifs);
    mergeFiles(vifs, logTime, lines, filename, cnt);
    filename.close();
    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    std::cout << "Elapsed time = " << elapsed_secs << "\n";
    return 0;
}

它完全按照它应该做的,但它很慢。要合并 82 个大小从 1 KB 到 250 MB 的文件,并创建一个超过 6000000 行的最终文件,需要 70 分钟。

如何加快算法速度?任何帮助是极大的赞赏!

更新

我也用堆实现了这个版本:

数据.h:

#pragma once

#include <string>

class Data
{
public:
    Data(DWORD index,
         const std::string & line,
         ULONG time);
    ~Data();
    inline const ULONG getTime() const  {return time; }
    inline const DWORD getIndex() const { return index; }
    inline const std::string getLine() const { return line; }
private:
    DWORD index;
    std::string line;
    ULONG time;
};

class Compare
{
public:
    bool operator()(const Data & lhs, const Data & rhs) { return lhs.getTime() > rhs.getTime(); };
};

数据.cpp:

#include "stdafx.h"
#include "Data.h"


Data::Data(DWORD i_index,
           const std::string & i_line,
           ULONG i_time)
    : index(i_index)
    , line(i_line)
    , time(i_time)
{
}


Data::~Data()
{
}

主要.cpp:

#include "stdafx.h"
#include "boost/regex.hpp"
#include "boost/lexical_cast.hpp"
#include "boost\filesystem.hpp"
#include <string>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <climits>
#include <ctime>
#include <queue>
#include "Data.h"
namespace fs = boost::filesystem;

static const boost::regex expression(R"(^(?:(?:TIME\s=\s\d{4}\/\d{2}\/\d{2}\s)|(?:@))([0-9:.,]+))");
static const boost::regex nameFileEx(R"(^[\d\-\_]+(\w+\s?\w+|\w+))");
static const std::string path("E:\\2017-02-01");
//static const std::string path("E:\\TestLog");

unsigned long time2Milleseconds(const std::string & time)
{
    int a, b, c, d;
    if (sscanf_s(time.c_str(), "%d:%d:%d,%d", &a, &b, &c, &d) >= 3)
        return a * 3600000 + b * 60000 + c * 1000 + d;
}

void initializeHeap(std::ifstream & ifs, std::priority_queue<Data, std::vector<Data>, Compare> & myHeap, const int index)
{
    ULONG time;
    std::string line;
    boost::smatch what;
    bool match = false;
    while (!match && std::getline(ifs, line))
    {
        if (boost::regex_search(line, what, expression))
        {
            time = time2Milleseconds(what[1]);
            myHeap.push(Data(index, line, time));
            match = true;
        }
    }
}

void checkRegex(std::vector<std::pair<std::ifstream, std::string>> & vifs, std::priority_queue<Data, std::vector<Data>, Compare> & myHeap, ULONG time, const int index)
{
    std::string line;
    boost::smatch what;
    if (std::getline(vifs[index].first, line))
    {
        if (boost::regex_search(line, what, expression))
        {
            time = time2Milleseconds(what[1]);
        }
        myHeap.push(Data(index, line, time));
    }
}

void mergeFiles(std::vector<std::pair<std::ifstream, std::string>> & vifs, std::priority_queue<Data, std::vector<Data>, Compare> & myHeap, std::ofstream & file)
{
    int index = 0;
    ULONG time = 0;
    while (!myHeap.empty())
    {
        index = myHeap.top().getIndex();
        time = myHeap.top().getTime();
        file << myHeap.top().getLine() << " --> " << vifs[index].second << "\n";
        myHeap.pop();
        checkRegex(vifs, myHeap, time, index);
    }
}

int main()
{
    clock_t begin = clock();
    int cnt = std::count_if(fs::directory_iterator(path), fs::directory_iterator(), static_cast<bool(*)(const fs::path&)>(fs::is_regular_file));
    std::priority_queue<Data, std::vector<Data>, Compare> myHeap;
    std::vector<std::pair<std::ifstream, std::string>> vifs(cnt);
    int index = 0;
    boost::smatch what;
    std::string file;
    for (fs::directory_iterator d(path); d != fs::directory_iterator(); ++d)
    {
        if (fs::is_regular_file(d->path()))
        {
            file = d->path().filename().string();
            if (boost::regex_search(file, what, nameFileEx))
            {
                vifs[index] = std::make_pair(std::ifstream(d->path().string()), what[1]);
                initializeHeap(vifs[index].first, myHeap, index);
                ++index;
            }
        }
    }
    std::ofstream filename(path + "\\TestLog.txt");
    mergeFiles(vifs, myHeap, filename);
    filename.close();
    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    std::cout << "Elapsed time = " << elapsed_secs << "\n";
    return 0;
}

完成所有这些工作后,我意识到昨天我在 Debug 中运行了该程序。在 Release 中启动这两个实现,我得到以下结果:

  • 矢量实现:约 25 秒
  • 堆实现:约 27 秒

因此,或者我的堆结构实现没有优化,或者两个实现在运行时间上相等。

我还能做些什么来加快执行速度吗?

4

1 回答 1

2

这可以更快地完成并且内存较少。首先考虑:

  • 从每个文件中读取一行(因此N任何时候只有行在内存中)。
  • 找到最小的N行,输出它。
  • 在内存中,将刚刚输出的值替换为当前输出来自的文件中的下一行(注意 EOF 情况)。

如果M是输出文件的长度(即所有日志组合的长度),那么简单的实现将是O(N * M).

但是,可以通过使用堆来改善上述情况,从而减少O(M log N). 也就是说,将N内存中的元素放在堆上。弹出顶部以输出最小的元素。然后,当您读取新行时,只需将该行放回堆中即可。

于 2017-04-05T22:07:43.700 回答