这篇文章将为大家详细讲解有关如何实现keras中的siamese?,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。

代码位于keras的官方样例,并做了微量修改和大量学习

最终效果:

import kerasimport numpy as npimport matplotlib.pyplot as pltimport randomfrom keras.callbacks import TensorBoardfrom keras.datasets import mnistfrom keras.models import Modelfrom keras.layers import Input, Flatten, Dense, Dropout, Lambdafrom keras.optimizers import RMSpropfrom keras import backend as Knum_classes = 10epochs = 20def euclidean_distance(vects): x, y = vects sum_square = K.sum(K.square(x - y), axis=1, keepdims=True) return K.sqrt(K.maximum(sum_square, K.epsilon()))def eucl_dist_output_shape(shapes): shape1, shape2 = shapes return (shape1[0], 1)def contrastive_loss(y_true, y_pred): '''Contrastive loss from Hadsell-et-al.'06 http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf ''' margin = 1 sqaure_pred = K.square(y_pred) margin_square = K.square(K.maximum(margin - y_pred, 0)) return K.mean(y_true * sqaure_pred + (1 - y_true) * margin_square)def create_pairs(x, digit_indices): '''Positive and negative pair creation. Alternates between positive and negative pairs. ''' pairs = [] labels = [] n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1 for d in range(num_classes): for i in range(n): z1, z2 = digit_indices[d][i], digit_indices[d][i + 1] pairs += [[x[z1], x[z2]]] inc = random.randrange(1, num_classes) dn = (d + inc) % num_classes z1, z2 = digit_indices[d][i], digit_indices[dn][i] pairs += [[x[z1], x[z2]]] labels += [1, 0] return np.array(pairs), np.array(labels)def create_base_network(input_shape): '''Base network to be shared (eq. to feature extraction). ''' input = Input(shape=input_shape) x = Flatten()(input) x = Dense(128, activation='relu')(x) x = Dropout(0.1)(x) x = Dense(128, activation='relu')(x) x = Dropout(0.1)(x) x = Dense(128, activation='relu')(x) return Model(input, x)def compute_accuracy(y_true, y_pred): # numpy上的操作 '''Compute classification accuracy with a fixed threshold on distances. ''' pred = y_pred.ravel() < 0.5 return np.mean(pred == y_true)def accuracy(y_true, y_pred): # Tensor上的操作 '''Compute classification accuracy with a fixed threshold on distances. ''' return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))def plot_train_history(history, train_metrics, val_metrics): plt.plot(history.history.get(train_metrics), '-o') plt.plot(history.history.get(val_metrics), '-o') plt.ylabel(train_metrics) plt.xlabel('Epochs') plt.legend(['train', 'validation'])# the data, split between train and test sets(x_train, y_train), (x_test, y_test) = mnist.load_data()x_train = x_train.astype('float32')x_test = x_test.astype('float32')x_train /= 255x_test /= 255input_shape = x_train.shape[1:]# create training+test positive and negative pairsdigit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]tr_pairs, tr_y = create_pairs(x_train, digit_indices)digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]te_pairs, te_y = create_pairs(x_test, digit_indices)# network definitionbase_network = create_base_network(input_shape)input_a = Input(shape=input_shape)input_b = Input(shape=input_shape)# because we re-use the same instance `base_network`,# the weights of the network# will be shared across the two branchesprocessed_a = base_network(input_a)processed_b = base_network(input_b)distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])model = Model([input_a, input_b], distance)keras.utils.plot_model(model,"siamModel.png",show_shapes=True)model.summary()# trainrms = RMSprop()model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])history=model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y, batch_size=128, epochs=epochs,verbose=2, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))plt.figure(figsize=(8, 4))plt.subplot(1, 2, 1)plot_train_history(history, 'loss', 'val_loss')plt.subplot(1, 2, 2)plot_train_history(history, 'accuracy', 'val_accuracy')plt.show()# compute final accuracy on training and test setsy_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])tr_acc = compute_accuracy(tr_y, y_pred)y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])te_acc = compute_accuracy(te_y, y_pred)print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))

关于如何实现keras中的siamese?就分享到这里了,希望以上内容可以对大家有一定的帮助,可以学到更多知识。如果觉得文章不错,可以把它分享出去让更多的人看到。