我正在尝试做一个简单的 Q 学习算法,但无论出于何种原因它都不会收敛。代理基本上应该从 5x5 网格上的一点到达目标点。当我运行它时,它似乎找到了最佳方式,但是它没有收敛,我不知道为什么。任何帮助,将不胜感激。我觉得某处有一个小错误,所以这就是为什么我要寻找一双新的眼睛。
代码:
function Q=ReinforcementLearning
clc;
format short
format compact
% three input: R, alpha and gamma
% immediate reward matrix;
% row and column = states; -Inf = no door between room
R=[-inf,-inf, 0, 0, -inf;
-inf,-inf, 0, 0, -inf;
0, 0, -inf, -inf, 100;
0, 0, -inf, -inf, -inf;
-inf,-inf, 0, -inf, 100];
gamma=0.8; % learning parameter
alpha=0.5;
oldQ = 0;
Q=zeros(size(R)); % initialize Q as zero
q1=ones(size(R))*inf; % initialize previous Q as big number
count=0; % counter
for episode=0:50000
% random initial state
y=randperm(size(R,1));
state=y(1);
% select any action from this state
x=find(R(state,:)>=0); % find possible action of this state
if size(x,1)>0,
x1=RandomPermutation(x); % randomize the possible action
x1=x1(1); % select an action
end
MaxQ=max(Q,[],2);
%Q(state,x1) = R(state,x1) + (gamma * MaxQ(x1)); %old function that works perfectly (converges)
Q(state,x1)= oldQ + alpha * (R(state,x1)+ (gamma * MaxQ(x1)) - oldQ); % new one that I need to implement
oldQ = Q(state,x1);
state=x1; %#ok<NASGU>
%Q = round(Q);
% break if convergence: small deviation on q for 1000 consecutive
if sum(sum(abs(q1-Q)))<5 & sum(sum(Q > 0))
if count>1000,
episode % report last episode
break % for
else
count=count+1; % set counter if deviation of q is small
end
else
q1=Q;
count=0; % reset counter when deviation of q from previous q is large
end
end
%normalize q
g=max(max(Q));
episode
if g>0,
Q=100*Q/g;
roundQ = round(Q);
roundQ
end