from keras.applications import VGG16
from keras.datasets import mnist
from keras.utils import to_categorical
from keras import models
from keras.layers.core import Dense,Flatten,Dropout
import cv2
import numpy as np
#加載資料
(x_train,y_train),(x_test,y_test)=mnist.load_data()
#VGG16模型,權重由ImageNet訓練而來,模型的默認輸入尺寸是224x224,但是最小是48x48
#修改資料集的尺寸、將灰度影像轉換為rgb影像
x_train=[cv2.cvtColor(cv2.resize(i,(48,48)),cv2.COLOR_GRAY2BGR)for i in x_train]
x_test=[cv2.cvtColor(cv2.resize(i,(48,48)),cv2.COLOR_GRAY2BGR)for i in x_test]
#第一步:通過np.newaxis函式把每一個圖片增加一個維度變成(1,48,48,3)。所以就有了程式中的arr[np.newaxis]。
#第二步:通過np.concatenate把每個陣列連接起來組成一個新的x_train陣列,連接后的x_train陣列shape為(10000,48,48,3)
x_train=np.concatenate([arr[np.newaxis]for arr in x_train])
x_test=np.concatenate([arr[np.newaxis]for arr in x_test])
x_train=x_train.astype("float32")/255
x_train=x_train.reshape((60000,48,48,3))
x_test=x_test.astype("float32")/255
x_test=x_test.reshape((10000,48,48,3))
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
#劃出驗證集
x_val=x_train[:10000]
y_val=y_train[:10000]
x_train=x_train[10000:]
y_train=y_train[10000:]
#建立模型
conv_base=VGG16(weights='imagenet',
include_top=False,
input_shape=(48,48,3))
conv_base.trainable=False
model=models.Sequential()
model.add(conv_base)
model.add(Flatten())
model.add(Dense(4096,activation="relu"))
model.add(Dropout(0.5))
# layer 14
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
# layer 15
model.add(Dense(10,activation="softmax"))
model.summary()
#編譯模型
model.compile(optimizer="rmsprop",loss="categorical_crossentropy",metrics=["accuracy"])
#訓練模型
model.fit(x_train,y_train,batch_size=64,epochs=5,validation_data=https://bbs.csdn.net/topics/(x_val,y_val))
#評估模型
test_loss,test_acc=model.evaluate(x_test,y_test,batch_size=64)
print("The accuracy is:"+str(test_acc))
轉載請註明出處,本文鏈接:https://www.uj5u.com/qita/284433.html
標籤:匯編語言
上一篇:基于布隆過濾器的url去重