我通常结合使用 CURL 和XMLReader
解析 MediaWiki API 生成的 XML。
请注意,您必须在标头中包含您的电子邮件地址User-Agent
,否则 API 脚本将响应 HTTP 403 Forbidden。
这是我初始化 CURL 句柄的方法:
define("EMAIL_ADDRESS", "my@email.com");
$ch = curl_init();
$cv = curl_version();
$user_agent = "curl ${cv['version']} (${cv['host']}) libcurl/${cv['version']} ${cv['ssl_version']} zlib/${cv['libz_version']} <" . EMAIL_ADDRESS . ">";
curl_setopt($ch, CURLOPT_USERAGENT, $user_agent);
curl_setopt($ch, CURLOPT_COOKIEFILE, "cookies.txt");
curl_setopt($ch, CURLOPT_COOKIEJAR, "cookies.txt");
curl_setopt($ch, CURLOPT_ENCODING, "deflate, gzip, identity");
curl_setopt($ch, CURLOPT_HEADER, FALSE);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, TRUE);
然后,您可以使用此代码获取 XML 并XMLReader
在以下位置构造一个新对象$xml_reader
:
curl_setopt($ch, CURLOPT_HTTPGET, TRUE);
curl_setopt($ch, CURLOPT_URL, "http://en.wikipedia.org/w/api.php?action=query&generator=allpages&gaplimit=2&gapfilterredir=nonredirects&gapfrom=Re&prop=revisions&rvprop=content&format=xml");
$xml = curl_exec($ch);
$xml_reader = new XMLReader();
$xml_reader->xml($xml, "UTF-8");
编辑:这是一个工作示例:
<?php
define("EMAIL_ADDRESS", "youlichika@hotmail.com");
$ch = curl_init();
$cv = curl_version();
$user_agent = "curl ${cv['version']} (${cv['host']}) libcurl/${cv['version']} ${cv['ssl_version']} zlib/${cv['libz_version']} <" . EMAIL_ADDRESS . ">";
curl_setopt($ch, CURLOPT_USERAGENT, $user_agent);
curl_setopt($ch, CURLOPT_COOKIEFILE, "cookies.txt");
curl_setopt($ch, CURLOPT_COOKIEJAR, "cookies.txt");
curl_setopt($ch, CURLOPT_ENCODING, "deflate, gzip, identity");
curl_setopt($ch, CURLOPT_HEADER, FALSE);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, TRUE);
curl_setopt($ch, CURLOPT_HTTPGET, TRUE);
curl_setopt($ch, CURLOPT_URL, "http://en.wikipedia.org/w/api.php?action=query&generator=allpages&gaplimit=2&gapfilterredir=nonredirects&gapfrom=Re&prop=revisions&rvprop=content&format=xml");
$xml = curl_exec($ch);
$xml_reader = new XMLReader();
$xml_reader->xml($xml, "UTF-8");
function extract_first_rev(XMLReader $xml_reader)
{
while ($xml_reader->read()) {
if ($xml_reader->nodeType == XMLReader::ELEMENT) {
if ($xml_reader->name == "rev") {
$content = htmlspecialchars_decode($xml_reader->readInnerXML(), ENT_QUOTES);
return $content;
}
} else if ($xml_reader->nodeType == XMLReader::END_ELEMENT) {
if ($xml_reader->name == "page") {
throw new Exception("Unexpectedly found `</page>`");
}
}
}
throw new Exception("Reached the end of the XML document without finding revision content");
}
$latest_rev = array();
while ($xml_reader->read()) {
if ($xml_reader->nodeType == XMLReader::ELEMENT) {
if ($xml_reader->name == "page") {
$latest_rev[$xml_reader->getAttribute("title")] = extract_first_rev($xml_reader);
}
}
}
function parse($rev)
{
global $ch;
curl_setopt($ch, CURLOPT_HTTPGET, TRUE);
curl_setopt($ch, CURLOPT_URL, "http://en.wikipedia.org/w/api.php?action=parse&text=" . rawurlencode($rev) . "&prop=text&format=xml");
sleep(3);
$xml = curl_exec($ch);
$xml_reader = new XMLReader();
$xml_reader->xml($xml, "UTF-8");
while ($xml_reader->read()) {
if ($xml_reader->nodeType == XMLReader::ELEMENT) {
if ($xml_reader->name == "text") {
$html = htmlspecialchars_decode($xml_reader->readInnerXML(), ENT_QUOTES);
return $html;
}
}
}
throw new Exception("Failed to parse");
}
foreach ($latest_rev as $title => $latest_rev) {
echo parse($latest_rev) . "\n";
}