我正在尝试创建一个基本的基于深度优先搜索的网络爬虫。这是我当前的代码:
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.io.*;
import java.net.*;
public class DepthFirstSpider {
private List<String> visitedList; //web pages already visited
private static String hrefExpr = "href\\s*=\\s*\"([^\"]+)\"";
private static Pattern pattern = Pattern.compile(hrefExpr);
private int limit;
private static Matcher matcher;
private static URL contextURL;
private static URL url;
public List<String> getVisitedList() { return visitedList; }
//initialize the visitedlist and limit instance variables. Visit the starting url.
public DepthFirstSpider(int limit, String startingURL) {
visitedList = new ArrayList<String>();
this.limit = limit;
try {
contextURL = new URL(startingURL);
} catch (MalformedURLException e) {
}
visit(startingURL);
}
//print and add urlString to list of visited web pages
//create url and connect, read through html contents:
//when href encountered create new url relative to the current url and visit it (if not already visited and limit not reached)
public void visit(String urlString) {
try{
url = new URL(contextURL, urlString);
URLConnection connection = url.openConnection();
InputStream inputStream = connection.getInputStream();
BufferedReader reader = new BufferedReader(
new InputStreamReader(inputStream));
String nextLine;
while((nextLine=reader.readLine()) != null){
matcher = pattern.matcher(nextLine);
while(matcher.find() && limit > 0 && !visitedList.contains(url.toString())){
System.out.println("visiting " + url.toString());
visitedList.add(url.toString());
visit(matcher.group(1));
limit--;
}
}
} catch (MalformedURLException e){
} catch (IOException e){
}
}
}
搜索当前可以毫无问题地击落网页树。我需要帮助让它重新启动,然后转到它错过的页面。谢谢您的帮助。