我有一个基本上以并行方式迭代以并行化以下过程的函数:
- 获取网页
- 解析html代码
- 返回结果并添加到结果集合
问题是第一次运行大约需要 5 分钟,第二次运行大约需要 40 分钟。输入集合没有改变,因此运行时间应该非常相似。任何想法?
我已经包含了 GetPrices(int)。第二次运行时,它从 netstat 中的 0 个连接开始(因此从第一次开始就没有可用的连接),但它只增长到 5 个连接(而不是第一次运行时的 30 个)。
另外,您认为退货收集是否需要锁定?
public Dictionary<int, Dictionary<int, double>> GetPrices(List<int> IDs)
{
Stopwatch web_time = new Stopwatch(), regex_time = new Stopwatch();
Dictionary<int, Dictionary<int, double>> ret = new Dictionary<int, Dictionary<int, double>>();
int aux_bkp = ServicePointManager.DefaultConnectionLimit;
ParallelOptions pOptions = new ParallelOptions();
ServicePointManager.DefaultConnectionLimit = 30;
pOptions.MaxDegreeOfParallelism = 35;
Parallel.ForEach(IDs, pOptions, ID=>
{
Dictionary<int, double> aux = GetPrices(ID);
lock (ret)
{
ret.Add(ID, new Dictionary<int, double>());
foreach (int kID in aux.Keys)
{
ret[mktID].Add(kID , aux[kID ]);
}
}
});
ServicePointManager.DefaultConnectionLimit = aux_bkp;
return ret;
}
public static Dictionary<int, double> GetPrices(int ID)
{
Stopwatch web_time = new Stopwatch(), regex_time = new Stopwatch();
WebClient webclient = new WebClient();
string resp;
Dictionary<int, double> ret = new Dictionary<int, double>();
bool success = false;
int retries = 0;
web_time.Start();
while (!success)
try
{
Debug.WriteLine(string.Format("Get HTML: ({1}) - ({0})", url, ID));
resp = webclient.DownloadString(url);
success = true;
web_time.Stop();
regex_time.Start();
regex_time.Start();
ret = ProcessHTML(resp, ID);
regex_time.Stop();
}
catch (WebException e)
{
System.Net.HttpWebResponse aux;
aux = e.Response as System.Net.HttpWebResponse;
if (aux == null || aux.StatusCode != HttpStatusCode.NotFound)
{
success = false;
retries++;
System.Threading.Thread.Sleep(new System.Random().Next(5000));
System.Diagnostics.Debug.WriteLine("HTTP Error - " + e.ToString());
}
else if (aux != null || aux.StatusCode == HttpStatusCode.NotFound)
{
success = true;
}
}
regex_time.Stop();
return ret;
}