我在 Cleverhans 库中运行修改后的代码时遇到问题。我正在尝试运行mnist_blackbox.py
示例的修改版本。输入是 3*680*1 而不是 28*28*1。这x_adv_sub
是一个 1*680*1 张量,并与x_test_rest_tf
2*680*1 张量相结合,为函数生成一个 3*680*1concat_adv
张量model_eval
。
accuracy = model_eval(sess, x, y, model.get_logits(concat_adv),
x_test, y_test, args=eval_params)
但是,当它运行时,它会引发以下错误:
“tensorflow.python.framework.errors_impl.InvalidArgumentError:您必须为占位符张量'Placeholder_2'提供一个dtype float和shape [?,1,680,1]的值”
我什至尝试连接相同的三个 1*680*1x_adv_sub
张量以使输入model_eval
起作用,但我仍然遇到相同的错误。
concat_adv=tf.concat([x_adv_sub, x_adv_sub,x_adv_sub], 1)
我不知道如何连接定义为占位符的张量。如果有人可以帮助我,我真的很感激。
这是代码的一部分:
def mnist_blackbox(train_start=0, train_end=1000, test_start=0,
test_end=200, nb_classes=NB_CLASSES,
batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE,
nb_epochs=NB_EPOCHS, holdout=HOLDOUT, data_aug=DATA_AUG,
nb_epochs_s=NB_EPOCHS_S, lmbda=LMBDA,
aug_batch_size=AUG_BATCH_SIZE):
"""
MNIST tutorial for the black-box attack from arxiv.org/abs/1602.02697
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: a dictionary with:
* black-box model accuracy on test set
* substitute model accuracy on test set
* black-box model accuracy on adversarial examples transferred
from the substitute model
"""
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Dictionary used to keep track and return key accuracies
accuracies = {}
# Perform tutorial setup
assert setup_tutorial()
# Create TF session
sess = tf.Session()
# Get data
with open('X.pickle','rb') as pickle_in:
x_all= pickle.load(pickle_in)
x_all=np.divide(x_all, 255)
with open('y.pickle','rb') as pickle_in:
y_all= pickle.load(pickle_in)
# Convert to float 32
x_all= np.float32(x_all)
y_all= np.float32(y_all)
num_class=3
class_lables=np.zeros((len(y_all),num_class))
# make y dataset a matrix (each row shows the class lable)
for index in range(len(y_all)):
if y_all[index]==0:
class_lables[index][0]=1
elif y_all[index]==1:
class_lables[index][1]=1
elif y_all[index]==2:
class_lables[index][2]=1
y_all=class_lables
#splitting data set to train/test randomly
x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=0.2)
"""
# Get MNIST data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
"""
# Initialize substitute training set reserved for adversary
x_sub = x_test[:holdout]
y_sub = y_test[:holdout]
x_sub_1=x_sub[:,:1,:,:]
# Redefine test set as remaining samples unavailable to adversaries
x_test = x_test[holdout:]
y_test = y_test[holdout:]
x_test_1=x_test[:,:1,:,:]
# Obtain Image parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Define input TF placeholder for X-sub Vulnerability Analysis
x_vul = tf.placeholder(tf.float32, shape=(None, 1, img_cols,
nchannels))
# Seed random number generator so tutorial is reproducible
rng = np.random.RandomState([2017, 8, 30])
# Simulate the black-box model locally
# You could replace this by a remote labeling API for instance
print("Preparing the black-box model.")
prep_bbox_out = prep_bbox(sess, x, y, x_train, y_train, x_test, y_test,
nb_epochs, batch_size, learning_rate,
rng, nb_classes, img_rows, img_cols, nchannels)
model, bbox_preds, accuracies['bbox'] = prep_bbox_out
# Train substitute using method from https://arxiv.org/abs/1602.02697
print("Training the substitute model.")
train_sub_out = train_sub(sess, x_vul, y, x_sub_1, y_sub, x_test_1, y_test,
nb_epochs, batch_size, learning_rate,
rng, nb_classes, 1, img_cols, nchannels)
model_sub, preds_sub, accuracies['sub'] = train_sub_out
# Initialize the Fast Gradient Sign Method (FGSM) attack object.
fgsm_par = {'eps': 0.1, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1.}
fgsm = FastGradientMethod(model_sub, sess=sess)
# Craft adversarial examples using the substitute
eval_params = {'batch_size': batch_size}
x_adv_sub = fgsm.generate(x_vul, **fgsm_par)
print(x_adv_sub.get_shape())
print(type(x_adv_sub))
x_test_rest=x_test[:,1:,:,:]
x_test_rest_tf=tf.convert_to_tensor(x_test_rest)
concat_adv=tf.concat([x_adv_sub, x_test_rest_tf], 1)
print(concat_adv.get_shape())
print(type(concat_adv))
# Evaluate the accuracy of the "black-box" model on adversarial examples
accuracy = model_eval(sess, x, y, model.get_logits(concat_adv),
x_test, y_test, args=eval_params)
print('Test accuracy of oracle on adversarial examples generated '
'using the substitute: ' + str(accuracy))
accuracies['bbox_on_sub_adv_ex'] = accuracy
return accuracies