2

假设我们有以下位置(FEN 8/1K6/8/4q2P/8/8/5k2/8 b - - 3 2): FEN 8/1K6/8/4q2P/8/8/5k2/8 b - - 3 2

当搜索深度低于 3 时,我的国际象棋引擎会产生正确的 Qxh5 移动。之后,问题似乎是它认为可以稍后进行捕获(将 Qh2 视为最佳移动)。我看不到任何明显的方法来更喜欢在评估算法中较早进行捕获的分支,因为这会破坏 negamax(和 minimax)工作所需的评估对称性。

仅供参考,这是我实际的 negamax 代码(从 wikipedia 复制):

int Search::negamaxSearch (Board& positionToSearch, int depth, int alpha, int beta) {
    std::vector<Move> moves = positionToSearch.getMoves();

    if (moves.empty()) {
        if (positionToSearch.isCheck()) {
            return EvaluationConstants::LOSE;
        } else {
            return EvaluationConstants::DRAW;
        }
    }

    if (depth == 0) {
        return BoardEvaluator::evaluateSimple(positionToSearch);
    }

    orderMoves(positionToSearch, moves, depth);

    int positionValue = -1e9;
    for (auto move : moves) {
        positionToSearch.executeMove(move);
        int newValue = -negamaxSearch(positionToSearch, depth - 1, -beta, -alpha);
        positionToSearch.unmakeMove();

        positionValue = std::max(positionValue, newValue);
        alpha = std::max(alpha, newValue);

        if (alpha >= beta) {
            ++cutoffs;
            break;
        }
    }

    return positionValue;
}

以及评价函数:

int BoardEvaluator::evaluateSimpleOneSide (const Board& board, PieceColor perspective) {
    if (board.isCheckmate()) return EvaluationConstants::LOSE;

    int value = 0;
    for (int pieceType = 0; pieceType < PieceTypes::KING; ++pieceType) {
        value += board.getPieces(perspective).boards[pieceType].popCount() * pieceValues[pieceType];
    }

    return value;
}

int BoardEvaluator::evaluateSimple (const Board& board) {
    return evaluateSimpleOneSide(board, board.getTurn()) - evaluateSimpleOneSide(board, flip(board.getTurn()));
}

编辑:


constexpr int MVV_LVA[7][7] = {
        {0,  0,   0,   0,   0,   0,   0},       // victim K, attacker K, Q, R, B, N, P, None
        {10, 100, 180, 300, 300, 900, 0}, // victim Q, attacker K, Q, R, B, N, P, None
        {6,  56,  100, 166, 166, 500, 0}, // victim R, attacker K, Q, R, B, N, P, None
        {3,  33,  60,  100, 100, 300, 0}, // victim B, attacker K, Q, R, B, N, P, None
        {3,  33,  60,  100, 100, 300, 0}, // victim N, attacker K, Q, R, B, N, P, None
        {1,  11,  20,  33,  33,  100, 0}, // victim P, attacker K, Q, R, B, N, P, None
        {0,  0,   0,   0,   0,   0,   0},       // victim None, attacker K, Q, R, B, N, P, None
};

int scoreMove (const Board& context, const Move& move) {
    int moveScoreGuess = 0;

    moveScoreGuess += MVV_LVA[context.getPieceAt(move.getDestination()).type][context.getPieceAt(move.getOrigin()).type];

    moveScoreGuess += BoardEvaluator::pieceValues[move.getPromotedPiece()];

    return moveScoreGuess;
}

void Search::orderMoves (Board& positionToSearch, std::vector<Move>& moves, int depth) {
    std::sort(moves.begin(), moves.end(), [&] (const Move& move1, const Move& move2) {
        return scoreMove(positionToSearch, move1) > scoreMove(positionToSearch, move2);
    });
}```


Is there something obvious wrong that I haven't noticed?

EDIT:
I actually managed to solve the mate problem by stopping the search inside my iterative deepening framework, if a checkmate is found at a lower than max level. This helped somewhat, but the capture problem still persists.
4

1 回答 1

0

通过定义一个在 make() 中递增并在 unmake() 中递减的 ply 变量来处理mate score ,在开始搜索时将其设置为 0。

VALUE_MATED = your_value; // the worst score of mated player, say -10000

int Search::negamaxSearch (Board& positionToSearch, int depth, int alpha, int beta) {
    std::vector<Move> moves = positionToSearch.getMoves();

    if (depth == 0) {
        return BoardEvaluator::evaluateSimple(positionToSearch);
    }

    orderMoves(positionToSearch, moves, depth);

    int positionValue = -1e9;
    for (auto move : moves) {
        positionToSearch.executeMove(move);
        ply++;
        int newValue = -negamaxSearch(positionToSearch, depth - 1, -beta, -alpha);
        positionToSearch.unmakeMove();
        ply--;

        positionValue = std::max(positionValue, newValue);
        alpha = std::max(alpha, newValue);

        if (alpha >= beta) {
            ++cutoffs;
            break;
        }
    }

    if (positionValue == -1e9) {
        if (positionToSearch.isCheck()) {
            return VALUE_MATED + ply
        } else {
            return EvaluationConstants::DRAW;
        }        
    }

    return positionValue;
}
于 2022-01-10T23:55:25.653 回答