我正在尝试从 PDF 中提取每个链接。我可以使用以下代码获取每个超链接:
folder = "test_folder"
folder_data = [os.path.join(dp, f) for dp, dn, filenames in os.walk(folder) for f in filenames if os.path.splitext(f)[1] == '.pdf']
data = [loc.replace("\\", "/") for loc in folder_data]
for loc in data:
doc = fitz.open(loc)
#color_check(doc, count)
file_name = loc.split("/")[-1]
print (f"INFO: Crawling over file {file_name}, number {count} of {len(data)}")
count += 1
for page in doc:
links = page.getLinks()
print(links)
for link in links:
uri_rect = []
uri_rect.append([round(link['from'][0], 2), round(link['from'][1], 2), round(link['from'][2], 2), round(link['from'][3], 2)])
words_in_document = page.getTextWords()
#print(links)
for word in words_in_document:
word_rect = []
word_rect.append([round(word[0], 2), round(word[1], 2), round(word[2], 2), round(word[3], 2)])
rect_dif_percentage = len(set(uri_rect[0])&set(word_rect[0])) / float(len(set(uri_rect[0]) | set(word_rect[0]))) * 100
if rect_dif_percentage >= 60:
#If link links to a file
try:
referenced_file_name = link['file'].split("/")[1]
referenced_file_path = link['file'].split("/")[0]
for file_loc in range(len(data)):
if referenced_file_name in data[file_loc]:
referenced_file_path = data[file_loc]
output.append([loc, word[4], referenced_file_path])
#If link links to a website
except:
referenced_file_name = "N/A"
referenced_file_path = link['uri']
output.append([loc, word[4], referenced_file_path])
with open("output.csv", "a", newline="") as f:
writer = csv.writer(f)
writer.writerows(output)
print("INFO: Crawling completed, you can close this window and check output.csv")
问题如下。如果超链接有多个单词,我将无法获取第二个单词,因为我使用的是 page.getLinks() 中的矩形,并且此方法只能找到超链接的第一个单词。
例如,下面的超链接: 点击我!
我的代码只能获取 ''Click'' 字符串。
我能做些什么来解决这个问题?我被困住了,我什么都想不起来。此外,如果您有其他不使用 PyMuPDF 的解决方案,欢迎使用!