我查看了一些常用工具,如Heapy来测量每种遍历技术使用了多少内存,但我不知道它们是否给了我正确的结果。这是一些给出上下文的代码。
该代码只是测量图中唯一节点的数量。提供了两种遍历技术,即。count_bfs
和count_dfs
import sys
from guppy import hpy
class Graph:
def __init__(self, key):
self.key = key #unique id for a vertex
self.connections = []
self.visited = False
def count_bfs(start):
parents = [start]
children = []
count = 0
while parents:
for ind in parents:
if not ind.visited:
count += 1
ind.visited = True
for child in ind.connections:
children.append(child)
parents = children
children = []
return count
def count_dfs(start):
if not start.visited:
start.visited = True
else:
return 0
n = 1
for connection in start.connections:
n += count_dfs(connection)
return n
def construct(file, s=1):
"""Constructs a Graph using the adjacency matrix given in the file
:param file: path to the file with the matrix
:param s: starting node key. Defaults to 1
:return start vertex of the graph
"""
d = {}
f = open(file,'rU')
size = int(f.readline())
for x in xrange(1,size+1):
d[x] = Graph(x)
start = d[s]
for i in xrange(0,size):
l = map(lambda x: int(x), f.readline().split())
node = l[0]
for child in l[1:]:
d[node].connections.append(d[child])
return start
if __name__ == "__main__":
s = construct(sys.argv[1])
#h = hpy()
print(count_bfs(s))
#print h.heap()
s = construct(sys.argv[1])
#h = hpy()
print(count_dfs(s))
#print h.heap()
我想知道两种遍历技术的总内存利用率不同的因素是什么。count_dfs
和count_bfs
?一个人的直觉dfs
可能是昂贵的,因为每个函数调用都会创建一个新堆栈。如何测量每种遍历技术中的总内存分配?
(评论的)hpy
陈述是否给出了所需的衡量标准?
带有连接的示例文件:
4
1 2 3
2 1 3
3 4
4