That server seems to respond quite nicely to XML requests and does not require that you move to subsequent pages for the remainder for the content.
Sub Get_Listings()
Dim sURL As String, iDIV As Long, htmlBDY As HTMLDocument, xmlHTTP As MSXML2.ServerXMLHTTP60
Set xmlHTTP = New MSXML2.ServerXMLHTTP60
Set htmlBDY = New HTMLDocument
'sURL = "http://web.vermont.org/Accounting?ysort=true"
sURL = "http://web.vermont.org/Dining?ysort=true"
With xmlHTTP
.Open "GET", sURL, False
.setRequestHeader "Content-Type", "text/xml"
.send
Do While .readyState <> READYSTATE_COMPLETE: DoEvents: Loop
If .Status <> 200 Then GoTo CleanUp
htmlBDY.body.innerHTML = .responseText
End With
With htmlBDY
For iDIV = 0 To (.getElementsByclassname("ListingResults_All_ENTRYTITLELEFTBOX").Length - 1)
If CBool(.getElementsByclassname("ListingResults_All_ENTRYTITLELEFTBOX")(iDIV).getElementsByTagName("a").Length) Then
Debug.Print _
.getElementsByclassname("ListingResults_All_ENTRYTITLELEFTBOX")(iDIV).getElementsByTagName("a")(0).innertext
End If
Next iDIV
End With
CleanUp:
Set htmlBDY = Nothing
Set xmlHTTP = Nothing
End Sub
You will need Microsoft XML 6.0, Microsoft HTML Object Library and Microsoft Internet Controls added to Tools, References. I'm offering this snippet as I could find no Terms of Use on that site that banned the use of robotic scrapers. Be careful that you do not get your IP banned due to repetitive scraping requests.