我对优化问题了解不多,但我得到了梯度下降的想法,所以我试图减少最大分数和最小分数之间的权重,我的脚本如下:
# coding: utf-8
import numpy as np
#7.72
#7.6
#8.26
def get_max(alist):
max_score = max(alist)
idx = alist.index(max_score)
return max_score, idx
def get_min(alist):
max_score = min(alist)
idx = alist.index(max_score)
return max_score, idx
def get_weighted(alist,aweight):
res = []
for i in range(0, len(alist)):
res.append(alist[i]*aweight[i])
return res
def get_sub(list1, list2):
res = []
for i in range(0, len(list1)):
res.append(list1[i] - list2[i])
return res
def grad_dec(w,dist, st = 0.001):
max_item, max_item_idx = get_max(dist)
min_item, min_item_idx = get_min(dist)
w[max_item_idx] = w[max_item_idx] - st
w[min_item_idx] = w[min_item_idx] + st
def cal_score(w, x):
score = []
print 'weight', w ,x
for i in range(0, len(x)):
score_i = 0
for j in range(0,5):
score_i = w[j]*x[i][j] + score_i
score.append(score_i)
# check variance is small enough
print 'score', score
return score
# cal_score(w,x)
if __name__ == "__main__":
init_w = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
x = [[7.3, 10, 8.3, 8.8, 4.2], [6.8, 8.9, 8.4, 9.7, 4.2], [6.9, 9.9, 9.7, 8.1, 6.7]]
score = cal_score(init_w,x)
variance = np.var(score)
round = 0
for round in range(0, 100):
if variance < 0.012:
print 'ok'
break
max_score, idx = get_max(score)
min_score, idx2 = get_min(score)
weighted_1 = get_weighted(x[idx], init_w)
weighted_2 = get_weighted(x[idx2], init_w)
dist = get_sub(weighted_1, weighted_2)
# print max_score, idx, min_score, idx2, dist
grad_dec(init_w, dist)
score = cal_score(init_w, x)
variance = np.var(score)
print 'variance', variance
print score
在我的实践中,它确实可以减少方差。我很高兴,但我不知道我的解决方案在数学上是否可靠。