1

我想用 R 中的 mxnet 从训练有素的 CNN 中绘制或可视化每一层的结果。就像 nn 的每一层可以看到的那些抽象艺术一样。

但我不知道怎么做。请有人帮助我。我能想到的一种方法是将权重和偏差放回每一步并绘制出这一步。但是当我尝试model$arg.params$convolution0_weight放回时mx.symbol.Convolution(),我得到

Error in mx.varg.symbol.Convolution(list(...)) : 
  ./base.h:291: Unsupported parameter type object type for argument weight, expect integer, logical, or string.

谁能帮我?

4

2 回答 2

1

这是可以帮助您实现所需的代码。下面的代码显示了 LeNet 的 2 个卷积层的激活。该代码作为输入 MNIST 数据集,它是 28x28 灰度图像(自动下载),并生成图像作为激活。

您可以从执行程序中获取输出。要查看可用输出列表,请使用names(executor$ref.outputs)

每个输出的结果都可以作为矩阵使用,其值在 [-1; 1]范围。矩阵的维度取决于层的参数。该代码使用这些矩阵显示为灰度图像,其中 -1 是白色像素,1 - 黑色像素。(大部分代码取自https://github.com/apache/incubator-mxnet/issues/1152并稍作修改)

该代码足以运行,但我注意到如果我在同一个 R 会话中第二次构建模型,输出的名称会获得不同的索引,然后代码会失败,因为预期的输出名称是硬编码的。因此,如果您决定多次创建模型,则需要重新启动 R 会话。

希望它有所帮助,您可以根据您的情况调整此示例。

library(mxnet)

download.file('https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/mnist_csv.zip', destfile = 'mnist_csv.zip')
unzip('mnist_csv.zip', exdir = '.')

train <- read.csv('train.csv', header=TRUE)

data.x <- train[,-1]
data.x <- data.x/255
data.y <- train[,1]

val_ind = 1:100

train.x <- data.x[-val_ind,]
train.x <- t(data.matrix(train.x))
train.y <- data.y[-val_ind]

val.x <- data.x[val_ind,]
val.x <- t(data.matrix(val.x))
val.y <- data.y[val_ind]

train.array <- train.x
dim(train.array) <- c(28, 28, 1, ncol(train.x))

val.array <- val.x
dim(val.array) <- c(28, 28, 1, ncol(val.x))

# input layer
data <- mx.symbol.Variable('data')
# first convolutional layer
convLayer1 <- mx.symbol.Convolution(data=data, kernel=c(5,5), num_filter=30)
convAct1 <- mx.symbol.Activation(data=convLayer1, act_type="tanh")
poolLayer1 <- mx.symbol.Pooling(data=convAct1, pool_type="max", kernel=c(2,2), stride=c(2,2))
# second convolutional layer
convLayer2 <- mx.symbol.Convolution(data=poolLayer1, kernel=c(5,5), num_filter=60)
convAct2 <- mx.symbol.Activation(data=convLayer2, act_type="tanh")
poolLayer2 <- mx.symbol.Pooling(data=convAct2, pool_type="max",
                                kernel=c(2,2), stride=c(2,2))

# big hidden layer
flattenData <- mx.symbol.Flatten(data=poolLayer2)
hiddenLayer <- mx.symbol.FullyConnected(flattenData, num_hidden=500)
hiddenAct <- mx.symbol.Activation(hiddenLayer, act_type="tanh")
# softmax output layer
outLayer <- mx.symbol.FullyConnected(hiddenAct, num_hidden=10)
LeNet1 <- mx.symbol.SoftmaxOutput(outLayer)


# Group some output layers for visual analysis
out <- mx.symbol.Group(c(convAct1, poolLayer1, convAct2, poolLayer2, LeNet1))
# Create an executor
executor <- mx.simple.bind(symbol=out, data=dim(val.array), ctx=mx.cpu())


# Prepare for training the model
mx.set.seed(0)
# Set a logger to keep track of callback data
logger <- mx.metric.logger$new()
# Using cpu by default, but set gpu if your machine has a supported one
devices=mx.cpu(0)
# Train model
model <- mx.model.FeedForward.create(LeNet1, X=train.array, y=train.y,
                                     eval.data=list(data=val.array, label=val.y),
                                     ctx=devices, 
                                     num.round=1, 
                                     array.batch.size=100,
                                     learning.rate=0.05, 
                                     momentum=0.9, 
                                     wd=0.00001,
                                     eval.metric=mx.metric.accuracy,
                                     epoch.end.callback=mx.callback.log.train.metric(100, logger))


# Update parameters
mx.exec.update.arg.arrays(executor, model$arg.params, match.name=TRUE)
mx.exec.update.aux.arrays(executor, model$aux.params, match.name=TRUE)
# Select data to use
mx.exec.update.arg.arrays(executor, list(data=mx.nd.array(val.array)), match.name=TRUE)
# Do a forward pass with the current parameters and data
mx.exec.forward(executor, is.train=FALSE)
# List of outputs available.
names(executor$ref.outputs)


# Plot the filters of a sample from validation set
sample_index <- 99 # sample number in validation set. Change it to if you want to see other samples

activation0_filter_count <- 30 # number of filters of the "convLayer1" layer 
par(mfrow=c(6,5), mar=c(0.1,0.1,0.1,0.1))  # number of rows x columns in output
dim(executor$ref.outputs$activation0_output)

for (i in 1:activation0_filter_count) {
  outputData <- as.array(executor$ref.outputs$activation0_output)[,,i,sample_index]
  image(outputData,
        xaxt='n', yaxt='n',
        col=gray(seq(1,0,-0.1)))
}

activation1_filter_count <- 60 # number of filters of the "convLayer2" layer 

dim(executor$ref.outputs$activation1_output)
par(mfrow=c(6,10), mar=c(0.1,0.1,0.1,0.1)) # number of rows x columns in output
for (i in 1:activation1_filter_count) {
  outputData <- as.array(executor$ref.outputs$activation1_output)[,,i,sample_index]
  image(outputData,
        xaxt='n', yaxt='n',
        col=gray(seq(1,0,-0.1)))
}

因此,您应该会看到验证示例 #2 的以下图像(使用 RStudio 左右箭头在它们之间导航)。

第一层激活

第二层激活

于 2018-02-14T01:38:58.383 回答
0

我想出了一个办法,却一步步遇到了困难。这就是我所做的。我在 中找到了所有经过训练的 cnn 参数model$arg.params,并使用参数进行计算,我们可以使用 mx.nd... 函数如下:

`#convolution 1_result

conv1_result<- mxnet::mx.nd.Convolution(data=mx.nd.array(train_array),weight=model$arg.params$convolution0_weight,bias=model$arg.params$convolution0_bias,kernel=c(8,8),num_filter = 50)

str(conv1_result)

tanh1_result<-mx.nd.Activation(data= conv1_result, act_type = "sigmoid")
pool1_result <- mx.nd.Pooling(data = tanh1_result, pool_type = "avg", kernel = c(4,4), stride = c(4,4))

转换2结果

conv2_result<- mxnet::mx.nd.Convolution(data=pool1_result,weight=model$arg.params$convolution1_weight,bias=model$arg.params$convolution1_bias,kernel=c(5,5),num_filter = 50)
tanh2_result<-mx.nd.Activation(data= conv1_result, act_type = "sigmoid")
pool2_result <- mx.nd.Pooling(data = tanh1_result, pool_type = "avg", kernel = c(4,4), stride = c(4,4))

第一个全连接层结果

flat_result <- mx.nd.flatten(data = pool2_result)
fcl_1_result <- mx.nd.FullyConnected(data = flat_result,weight = model$arg.params$fullyconnected0_weight,bias = model$arg.params$fullyconnected0_bias, num_hidden = 500)
tanh_3_result <- mx.nd.Activation(data = fcl_1_result, act_type = "tanh")

第二个全连接层结果

fcl_2_result <- mx.nd.FullyConnected(data = tanh_3,weight = model$arg.params$fullyconnected1_weight,bias = model$arg.params$fullyconnected1_bias, num_hidden =100)`

但是当我开始时 mx.nd.FullyConnected() ,我遇到了内存不足(我有 16 GB RAM)并且 R 崩溃了。那么,有谁知道如何批量调整输入数据的大小 mx.nd.FullyConnected(),或者有什么方法可以像以前一样mx.nd.FullyConnected()成功运行mx.model.FeedForward.create()

于 2017-10-30T15:40:08.317 回答