所以我有一个算法,可以从(非常大,~155+ MB)二进制文件中读取,根据规范解析它并写出必要的信息(到 CSV,平面文本)。对于前 1550 万行的输出,它可以完美运行,生成约 0.99-1.03 GB 的 CSV 文件。这通过几乎不超过 20% 的二进制文件。在此之后它会中断,因为突然打印的数据根本不是二进制文件中显示的内容。我检查了二进制文件,同样的模式还在继续(数据分成“数据包”——见下面的代码)。由于它的处理方式,内存使用量从未真正增加(稳定〜15K)。功能代码如下。是我的算法吗(如果是这样,为什么它会在 1550 万行之后中断?!)......由于文件大小很大,我没有考虑其他影响吗?有任何想法吗?
(仅供参考:每个“数据包”的长度为 77 字节,以 3 字节的“起始码”开始,以 5 字节的“结束码”结束 - 您将看到下面的模式)
编辑代码已根据以下建议更新...谢谢!
private void readBin(string theFile)
{
List<int> il = new List<int>();
bool readyForProcessing = false;
byte[] packet = new byte[77];
try
{
FileStream fs_bin = new FileStream(theFile, FileMode.Open);
BinaryReader br = new BinaryReader(fs_bin);
while (br.BaseStream.Position < br.BaseStream.Length && working)
{
// Find the first startcode
while (!readyForProcessing)
{
// If last byte of endcode adjacent to first byte of startcod...
// This never occurs outside of ending/starting so it's safe
if (br.ReadByte() == 0x0a && br.PeekChar() == (char)0x16)
readyForProcessing = true;
}
// Read a full packet of 77 bytes
br.Read(packet, 0, packet.Length);
// Unnecessary I guess now, but ensures packet begins
// with startcode and ends with endcode
if (packet.Take(3).SequenceEqual(STARTCODE) &&
packet.Skip(packet.Length - ENDCODE.Length).SequenceEqual(ENDCODE))
{
il.Add(BitConverter.ToUInt16(packet, 3)); //il.ElementAt(0) == 2byte id
il.Add(BitConverter.ToUInt16(packet, 5)); //il.ElementAt(1) == 2byte semistable
il.Add(packet[7]); //il.ElementAt(2) == 1byte constant
for(int i = 8; i < 72; i += 2) //start at 8th byte, get 64 bytes
il.Add(BitConverter.ToUInt16(packet, i));
for (int i = 3; i < 35; i++)
{
sw.WriteLine(il.ElementAt(0) + "," + il.ElementAt(1) +
"," + il.ElementAt(2) + "," + il.ElementAt(i));
}
il.Clear();
}
else
{
// Handle "bad" packets
}
} // while
fs_bin.Flush();
br.Close();
fs_bin.Close();
}
catch (Exception e)
{
MessageBox.Show(e.ToString());
}
}