我正在使用模拟退火来帮助解决诸如旅行商问题之类的问题。我得到了一个我很满意的解决方案,但我现在想知道在解决方案空间内的路径(即算法为达到解决方案所采取的步骤),算法从随机,远非最佳行程,到相当最佳的行程。
换句话说,在旅行商问题的情况下:
- 从城市之间的随机行程开始
- 步骤 1:交换城市 AC 和 EF 之间的路径
- 步骤 2:城市 GU 和 SQ 之间的路径被交换
- ...
- 最终获得城市之间的最佳行程
有没有一种方法,使用模拟退火,来保存优化过程的每个步骤,以便我可以一个一个地追溯对系统所做的每个更改,最终导致该特定解决方案?或者这违背了模拟退火的工作原理?
这是一个很棒的模拟退火算法,由@perrygeo 编写,使用旅行推销员示例从https://github.com/perrygeo/simanneal/blob/master/simanneal/anneal.py稍作修改: https ://github.com /perrygeo/simanneal/blob/master/examples/salesman.py (我所有的更改后面都是一行注释,前面是“”)
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import math
import sys
import time
import random
def round_figures(x, n):
"""Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%4i:%02i:%02i' % (h, m, s)
class Annealer(object):
"""Performs simulated annealing by calling functions to calculate
energy and make moves on a state. The temperature schedule for
annealing may be provided manually or estimated automatically.
"""
Tmax = 25000.0
Tmin = 2.5
steps = 50000
updates = 100
copy_strategy = 'deepcopy'
def __init__(self, initial_state):
self.initial_state = initial_state
self.state = self.copy_state(initial_state)
def set_schedule(self, schedule):
"""Takes the output from `auto` and sets the attributes
"""
self.Tmax = schedule['tmax']
self.Tmin = schedule['tmin']
self.steps = int(schedule['steps'])
def copy_state(self, state):
"""Returns an exact copy of the provided state
Implemented according to self.copy_strategy, one of
* deepcopy : use copy.deepcopy (slow but reliable)
* slice: use list slices (faster but only works if state is list-like)
* method: use the state's copy() method
"""
if self.copy_strategy == 'deepcopy':
return copy.deepcopy(state)
elif self.copy_strategy == 'slice':
return state[:]
elif self.copy_strategy == 'method':
return state.copy()
def update(self, step, T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible."""
elapsed = time.time() - self.start
if step == 0:
print(' Temperature Energy Accept Improve Elapsed Remaining')
print('%12.2f %12.2f %s ' % \
(T, E, time_string(elapsed)))
else:
remain = (self.steps - step) * (elapsed / step)
print('%12.2f %12.2f %7.2f%% %7.2f%% %s %s' % \
(T, E, 100.0 * acceptance, 100.0 * improvement,
time_string(elapsed), time_string(remain)))
def anneal(self):
"""Minimizes the energy of a system by simulated annealing.
Parameters
state : an initial arrangement of the system
Returns
(state, energy): the best state and energy found.
"""
step = 0
self.start = time.time()
steps = [] ### initialise a list to save the steps taken by the algorithm to find a good solution
# Precompute factor for exponential cooling from Tmax to Tmin
if self.Tmin <= 0.0:
raise Exception('Exponential cooling requires a minimum "\
"temperature greater than zero.')
Tfactor = -math.log(self.Tmax / self.Tmin)
# Note initial state
T = self.Tmax
E = self.energy()
prevState = self.copy_state(self.state)
prevEnergy = E
bestState = self.copy_state(self.state)
bestEnergy = E
trials, accepts, improves = 0, 0, 0
if self.updates > 0:
updateWavelength = self.steps / self.updates
self.update(step, T, E, None, None)
# Attempt moves to new states
while step < self.steps:
step += 1
T = self.Tmax * math.exp(Tfactor * step / self.steps)
a,b = self.move()
E = self.energy()
dE = E - prevEnergy
trials += 1
if dE > 0.0 and math.exp(-dE / T) < random.random():
# Restore previous state
self.state = self.copy_state(prevState)
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
prevState = self.copy_state(self.state)
prevEnergy = E
steps.append([a,b]) ### append the "good move" to the list of steps
if E < bestEnergy:
bestState = self.copy_state(self.state)
bestEnergy = E
if self.updates > 1:
if step // updateWavelength > (step - 1) // updateWavelength:
self.update(
step, T, E, accepts / trials, improves / trials)
trials, accepts, improves = 0, 0, 0
# Return best state and energy
return bestState, bestEnergy, steps ### added steps to what should be returned
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
class TravellingSalesmanProblem(Annealer):
"""Test annealer with a travelling salesman problem.
"""
# pass extra data (the distance matrix) into the constructor
def __init__(self, state, distance_matrix):
self.distance_matrix = distance_matrix
super(TravellingSalesmanProblem, self).__init__(state) # important!
def move(self):
"""Swaps two cities in the route."""
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[a], self.state[b] = self.state[b], self.state[a]
return a,b ### return the change made
def energy(self):
"""Calculates the length of the route."""
e = 0
for i in range(len(self.state)):
e += self.distance_matrix[self.state[i-1]][self.state[i]]
return e
if __name__ == '__main__':
# latitude and longitude for the twenty largest U.S. cities
cities = {
'New York City': (40.72, 74.00),
'Los Angeles': (34.05, 118.25),
'Chicago': (41.88, 87.63),
'Houston': (29.77, 95.38),
'Phoenix': (33.45, 112.07),
'Philadelphia': (39.95, 75.17),
'San Antonio': (29.53, 98.47),
'Dallas': (32.78, 96.80),
'San Diego': (32.78, 117.15),
'San Jose': (37.30, 121.87),
'Detroit': (42.33, 83.05),
'San Francisco': (37.78, 122.42),
'Jacksonville': (30.32, 81.70),
'Indianapolis': (39.78, 86.15),
'Austin': (30.27, 97.77),
'Columbus': (39.98, 82.98),
'Fort Worth': (32.75, 97.33),
'Charlotte': (35.23, 80.85),
'Memphis': (35.12, 89.97),
'Baltimore': (39.28, 76.62)
}
# initial state, a randomly-ordered itinerary
init_state = list(cities.keys())
random.shuffle(init_state)
reconstructed_state = init_state
# create a distance matrix
distance_matrix = {}
for ka, va in cities.items():
distance_matrix[ka] = {}
for kb, vb in cities.items():
if kb == ka:
distance_matrix[ka][kb] = 0.0
else:
distance_matrix[ka][kb] = distance(va, vb)
tsp = TravellingSalesmanProblem(init_state, distance_matrix)
# since our state is just a list, slice is the fastest way to copy
tsp.copy_strategy = "slice"
state, e, steps = tsp.anneal()
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print("Results:")
for city in state:
print("\t", city)
### recontructed the annealing process
print("")
print("nbr. of steps:",len(steps))
print("Reconstructed results:")
for s in steps:
reconstructed_state[s[0]], reconstructed_state[s[1]] = reconstructed_state[s[1]], reconstructed_state[s[0]]
while reconstructed_state[0] != 'New York City':
reconstructed_state = reconstructed_state[1:] + reconstructed_state[:1] # rotate NYC to start
for city in reconstructed_state:
print("\t", city)
每次进行移动时都进行保存,从而构建了一个确实可追溯的庞大步骤列表。然而,它显然模仿了算法如何在解空间中探索和跳跃许多不同的位置,尤其是在高温下。
为了获得更直接收敛的步骤,我可以移动步骤节省线:
steps.append([a,b]) ### append the "good move" to the list of steps
在下面
if E < bestEnergy:
只有实际改进迄今为止找到的最佳解决方案的步骤被保存。但是,最终的步骤列表不再有助于重建行程(缺少步骤)。
这个问题是没有希望的并且是模拟退火如何工作的固有问题,还是有希望能够构建一个从随机到准最优的收敛步骤列表?