自己动手,编写神经网络程序,解决Mnist问题,并网络化部署-5keras自带的模型之间的关系
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
from keras.utils.data_utils import get_file
model = ResNet50(weights=\’imagenet\’)
path=\’1.jpg\’
img_path = get_file(path,origin=\’http://pic.qiantucdn.com/58pic/26/23/18/58c959d01a57d_1024.jpg\’)
print(img_path)
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print(\’Predicted:\’, decode_predictions(preds, top=3)[0])
# Predicted: [(u\’n02504013\’, u\’Indian_elephant\’, 0.82658225), (u\’n01871265\’, u\’tusker\’, 0.1122357), (u\’n02504458\’, u\’African_elephant\’, 0.061040461)]
6、迁移学习
from keras.datasets import mnist
import gc
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
import cv2
import h5py as h5py
import numpy as np
def tran_y(y):
y_ohe = np.zeros(10)
y_ohe[y] = 1
return y_ohe
# 如果硬件配置较高,比如主机具备32GB以上内存,GPU具备8GB以上显存,可以适当增大这个值。VGG要求至少48像素
ishape=48
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = [cv2.cvtColor(cv2.resize(i, (ishape, ishape)), cv2.COLOR_GRAY2BGR) for i in X_train]
X_train = np.concatenate([arr[np.newaxis] for arr in X_train]).astype(\’float32\’)
X_train /= 255.0
X_test = [cv2.cvtColor(cv2.resize(i, (ishape, ishape)), cv2.COLOR_GRAY2BGR) for i in X_test]
X_test = np.concatenate([arr[np.newaxis] for arr in X_test]).astype(\’float32\’)
X_test /= 255.0
y_train_ohe = np.array([tran_y(y_train[i]) for i in range(len(y_train))])
y_test_ohe = np.array([tran_y(y_test[i]) for i in range(len(y_test))])
y_train_ohe = y_train_ohe.astype(\’float32\’)
y_test_ohe = y_test_ohe.astype(\’float32\’)
model_vgg = VGG16(include_top = False, weights = \’imagenet\’, input_shape = (ishape, ishape, 3))
#for i, layer in enumerate(model_vgg.layers):
# if i<20:
for layer in model_vgg.layers:
layer.trainable = False
model = Flatten()(model_vgg.output)
model = Dense(4096, activation=\’relu\’, name=\’fc1\’)(model)
model = Dense(4096, activation=\’relu\’, name=\’fc2\’)(model)
model = Dropout(0.5)(model)
model = Dense(10, activation = \’softmax\’, name=\’prediction\’)(model)
model_vgg_mnist_pretrain = Model(model_vgg.input, model, name = \’vgg16_pretrain\’)
model_vgg_mnist_pretrain.summary()
sgd = SGD(lr = 0.05, decay = 1e–5)
model_vgg_mnist_pretrain.compile(loss = \’categorical_crossentropy\’, optimizer = sgd, metrics = [\’accuracy\’])
model_vgg_mnist_pretrain.fit(X_train, y_train_ohe, validation_data = (X_test, y_test_ohe), epochs = 10, batch_size = 64)
#del(model_vgg_mnist_pretrain, model_vgg, model)
for i in range(100):
gc.collect()_________________________________________________________________
import gc
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
import matplotlib.pyplot as plt
import os
import cv2
import h5py as h5py
import numpy as np
def tran_y(y):
y_ohe = np.zeros(10)
y_ohe[y] = 1
return y_ohe
# 如果硬件配置较高,比如主机具备32GB以上内存,GPU具备8GB以上显存,可以适当增大这个值。VGG要求至少48像素
ishape=48
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = [cv2.cvtColor(cv2.resize(i, (ishape, ishape)), cv2.COLOR_GRAY2BGR) for i in X_train]
X_train = np.concatenate([arr[np.newaxis] for arr in X_train]).astype(\’float32\’)
X_train /= 255.0
X_test = [cv2.cvtColor(cv2.resize(i, (ishape, ishape)), cv2.COLOR_GRAY2BGR) for i in X_test]
X_test = np.concatenate([arr[np.newaxis] for arr in X_test]).astype(\’float32\’)
X_test /= 255.0
y_train_ohe = np.array([tran_y(y_train[i]) for i in range(len(y_train))])
y_test_ohe = np.array([tran_y(y_test[i]) for i in range(len(y_test))])
y_train_ohe = y_train_ohe.astype(\’float32\’)
y_test_ohe = y_test_ohe.astype(\’float32\’)
model_vgg = VGG16(include_top = False, weights = \’imagenet\’, input_shape = (ishape, ishape, 3))
for layer in model_vgg.layers:
layer.trainable = False
model = Flatten()(model_vgg.output)
model = Dense(4096, activation=\’relu\’, name=\’fc1\’)(model)
model = Dense(4096, activation=\’relu\’, name=\’fc2\’)(model)
model = Dropout(0.5)(model)
model = Dense(10, activation = \’softmax\’, name=\’prediction\’)(model)
model_vgg_mnist_pretrain = Model(model_vgg.input, model, name = \’vgg16_pretrain\’)
model_vgg_mnist_pretrain.summary()
sgd = SGD(lr = 0.05, decay = 1e–5)
model_vgg_mnist_pretrain.compile(loss = \’categorical_crossentropy\’, optimizer = sgd, metrics = [\’accuracy\’])
log = model_vgg_mnist_pretrain.fit(X_train, y_train_ohe, validation_data = (X_test, y_test_ohe), epochs = 10, batch_size = 64)
score = model_vgg_mnist_pretrain.evaluate(x_test, y_test, verbose=0)
print(\’Test loss:\’, score[0])
print(\’Test accuracy:\’, score[1])
plt.figure(\’acc\’)
plt.subplot(2, 1, 1)
plt.plot(log.history[\’acc\’],\’r–\’,label=\’Training Accuracy\’)
plt.plot(log.history[\’val_acc\’],\’r-\’,label=\’Validation Accuracy\’)
plt.legend(loc=\’best\’)
plt.xlabel(\’Epochs\’)
plt.axis([0, epochs, 0.9, 1])
plt.figure(\’loss\’)
plt.subplot(2, 1, 2)
plt.plot(log.history[\’loss\’],\’b–\’,label=\’Training Loss\’)
plt.plot(log.history[\’val_loss\’],\’b-\’,label=\’Validation Loss\’)
plt.legend(loc=\’best\’)
plt.xlabel(\’Epochs\’)
plt.axis([0, epochs, 0, 1])
plt.show()
os.system(“pause”)