import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense from tensorflow.keras.optimizers import Adam # Load MNIST data (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype("float32") / 255.0 x_test = x_test.astype("float32") / 255.0 # Flatten the images x_train = x_train.reshape((x_train.shape[0], 784)) x_test = x_test.reshape((x_test.shape[0], 784)) # Define the autoencoder architecture input_img = Input(shape=(784,)) input_hidden = Dense(75,activation='relu')(input_img) encoded = Dense(25, activation='relu')(input_hidden) decoded_hidden=Dense(75,activation='relu')(encoded) decoded = Dense(784, activation='sigmoid')(decoded_hidden) autoencoder = Model(input_img, decoded) # Compile the model autoencoder.compile(optimizer=Adam(), loss='mse') # Train the autoencoder history = autoencoder.fit(x_train, x_train, epochs=20, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) # Plot training and validation loss plt.plot(history.history['loss'], label='Train Loss') plt.plot(history.history['val_loss'], label='Validation Loss') plt.title("Autoencoder Reconstruction Error (MSE)") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.grid(True) plt.show() # Reconstruct some digits decoded_imgs = autoencoder.predict(x_test) # Visualize original vs reconstructed digits n = 10 # Number of digits to display plt.figure(figsize=(20, 4)) for i in range(n): # Original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28), cmap='gray') plt.title("Original") plt.axis('off') # Reconstructed ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28), cmap='gray') plt.title("Reconstructed") plt.axis('off') plt.suptitle("Original vs Reconstructed Digits", fontsize=16) plt.show()