我们有一个定期接收彩信的应用程序,并且应该回复它们。
我们目前用一个线程来做这个,首先接收消息,然后一个一个地处理它们。这可以完成工作,但速度很慢。
所以我们现在正在考虑同时使用多个线程来执行相同的过程。
允许并行处理传入记录的任何简单方法,但避免两个线程错误地处理相同的记录?
我们有一个定期接收彩信的应用程序,并且应该回复它们。
我们目前用一个线程来做这个,首先接收消息,然后一个一个地处理它们。这可以完成工作,但速度很慢。
所以我们现在正在考虑同时使用多个线程来执行相同的过程。
允许并行处理传入记录的任何简单方法,但避免两个线程错误地处理相同的记录?
允许并行处理传入记录的任何简单方法,但避免两个线程错误地处理相同的记录?
是的,实际上并不太难,您想要做的就是所谓的“生产者-消费者模型”
如果您的消息接收器一次只能处理一个线程,但您的消息“处理器”可以同时处理多条消息,您只需要使用BlockingCollection来存储需要处理的工作
public sealed class MessageProcessor : IDisposable
{
public MessageProcessor()
: this(-1)
{
}
public MessageProcessor(int maxThreadsForProcessing)
{
_maxThreadsForProcessing = maxThreadsForProcessing;
_messages = new BlockingCollection<Message>();
_cts = new CancellationTokenSource();
_messageProcessorThread = new Thread(ProcessMessages);
_messageProcessorThread.IsBackground = true;
_messageProcessorThread.Name = "Message Processor Thread";
_messageProcessorThread.Start();
}
public int MaxThreadsForProcessing
{
get { return _maxThreadsForProcessing; }
}
private readonly BlockingCollection<Message> _messages;
private readonly CancellationTokenSource _cts;
private readonly Thread _messageProcessorThread;
private bool _disposed = false;
private readonly int _maxThreadsForProcessing;
/// <summary>
/// Add a new message to be queued up and processed in the background.
/// </summary>
public void ReceiveMessage(Message message)
{
_messages.Add(message);
}
/// <summary>
/// Signals the system to stop processing messages.
/// </summary>
/// <param name="finishQueue">Should the queue of messages waiting to be processed be allowed to finish</param>
public void Stop(bool finishQueue)
{
_messages.CompleteAdding();
if(!finishQueue)
_cts.Cancel();
//Wait for the message processor thread to finish it's work.
_messageProcessorThread.Join();
}
/// <summary>
/// The background thread that processes messages in the system
/// </summary>
private void ProcessMessages()
{
try
{
Parallel.ForEach(_messages.GetConsumingEnumerable(),
new ParallelOptions()
{
CancellationToken = _cts.Token,
MaxDegreeOfParallelism = MaxThreadsForProcessing
},
ProcessMessage);
}
catch (OperationCanceledException)
{
//Don't care that it happened, just don't want it to bubble up as a unhandeled exception.
}
}
private void ProcessMessage(Message message, ParallelLoopState loopState)
{
//Here be dragons! (or your code to process a message, your choice :-))
//Use if(_cts.Token.IsCancellationRequested || loopState.ShouldExitCurrentIteration) to test if
// we should quit out of the function early for a graceful shutdown.
}
public void Dispose()
{
if(!_disposed)
{
if(_cts != null && _messages != null && _messageProcessorThread != null)
Stop(true); //This line will block till all queued messages have been processed, if you want it to be quicker you need to call `Stop(false)` before you dispose the object.
if(_cts != null)
_cts.Dispose();
if(_messages != null)
_messages.Dispose();
GC.SuppressFinalize(this);
_disposed = true;
}
}
~MessageProcessor()
{
//Nothing to do, just making FXCop happy.
}
}
我强烈建议您阅读免费的并行编程模式一书,它对此进行了一些详细介绍。有一整节详细解释了生产者-消费者模型。
更新:GetConsumingEnumerable()
and存在一些性能问题Parallel.ForEach(
,而是使用库ParallelExtensionsExtras
和它的新扩展方法GetConsumingPartitioner()
public static Partitioner<T> GetConsumingPartitioner<T>(
this BlockingCollection<T> collection)
{
return new BlockingCollectionPartitioner<T>(collection);
}
private class BlockingCollectionPartitioner<T> : Partitioner<T>
{
private BlockingCollection<T> _collection;
internal BlockingCollectionPartitioner(
BlockingCollection<T> collection)
{
if (collection == null)
throw new ArgumentNullException("collection");
_collection = collection;
}
public override bool SupportsDynamicPartitions {
get { return true; }
}
public override IList<IEnumerator<T>> GetPartitions(
int partitionCount)
{
if (partitionCount < 1)
throw new ArgumentOutOfRangeException("partitionCount");
var dynamicPartitioner = GetDynamicPartitions();
return Enumerable.Range(0, partitionCount).Select(_ =>
dynamicPartitioner.GetEnumerator()).ToArray();
}
public override IEnumerable<T> GetDynamicPartitions()
{
return _collection.GetConsumingEnumerable();
}
}