importnumpyas np importmatplotlib.pyplotasplt fromtensorflow.keras.datasetsimportmnist # библиотекабазывыборокMnist fromtensorflowimportkeras fromtensorflow.keras.layersimport Dense, Flatten, Dropout, Conv2D, MaxPooling2D (x_train,y_train), (x_test,y_test) =mnist.load_data() # стандартизация входных данных x_train=x_train / 255 x_test=x_test / 255 y_train_cat=keras.utils.to_categorical(y_train,10) y_test_cat=keras.utils.to_categorical(y_test,10) x_train=np.expand_dims(x_train, axis=3) x_test=np.expand_dims(x_test, axis=3) print( x_train.shape ) model=keras.Sequential([ Conv2D(32, (3,3), padding='same', activation='relu',input_shape=(28,28,1)), MaxPooling2D((2,2), strides=2), Conv2D(64, (3,3), padding='same', activation='relu'), MaxPooling2D((2,2), strides=2), Flatten(), Dense(128, activation='relu'), Dense(10, activation='softmax') ]) print(model.summary()) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) his =model.fit(x_train,y_train_cat,batch_size=32, epochs=5,validation_split=0.2) model.evaluate(x_test,y_test_cat) Исправить ошибки
Expert-level academic advice to solve the user's problem:'import numpy as npimport matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
(x_train, y_train), (x_test, y_test) = mnist.load_data() # Load Mnist datasets from the tensorflow.keras.datasets library
# Standardize the input data
x_train = x_train / 255
x_test = x_test / 255
y_train_cat = keras.utils.to_categorical(y_train, 10) # Convert labels to categorical using keras utils
y_test_cat = keras.utils.to_categorical(y_test, 10)
x_train = np.expand_dims(x_train, axis=3) # Add a dimension to the input data to work with Conv2D layers
x_test = np.expand_dims(x_test, axis=3)
print(x_train.shape)
model = keras.Sequential([
Conv2D(32, (3,3), padding='same', activation='relu', input_shape=(28,28,1)),
MaxPooling2D((2,2), strides=2),
Conv2D(64, (3,3), padding='same', activation='relu'),
MaxPooling2D((2,2), strides=2),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
]) # Create a sequential model with Conv2D, MaxPooling2D, and Dense layers
print(model.summary()) # Print model summary for reference
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']) # Compile the model using Adam optimizer, categorical crossentropy loss, and accuracy metrics
history = model.fit(x_train, y_train_cat, batch_size=32, epochs=5, validation_split=0.2) # Train the model on the training data with batch size of 32, 5 epochs, and using 20% of the data as validation set
model.evaluate(x_test, y_test_cat) # Evaluate the model performance on the test set