I was trying to train the model.
I got this error while run the code below:
ValueError: Arguments ‘target’ and ‘output’ must have the same shape. Received: target.shape=(None, 3), output.shape=(None, 2)
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from tensorflow import keras
from keras import layers
from tensorflow.keras.models import Sequential # type: ignore
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense # type: ignore
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization # type: ignore
from tensorflow.keras.utils import image_dataset_from_directory, to_categorical # type: ignore
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img # type: ignore
from tensorflow.keras.preprocessing import image_dataset_from_directory # type: ignore
import os
import matplotlib.image as mpimg
import zipfile
dataset = zipfile.ZipFile("D:/Downloads/archive.zip", 'r')
dataset.extractall()
dataset.close()
path = 'D:/Downloads/chest_xray/chest_xray/train'
path = 'D:/Języki programowania/Python/Projekty/Pneumonia_Detection_CNN/kaggle/chest_xray/chest_xray/train'
classes = os.listdir(path)
print(classes)
PNEUMONIA_dir = os.path.join(path + '/' + 'PNEUMONIA')
NORMAL_dir = os.path.join(path + '/' + 'NORMAL')
pneumonia_names = os.listdir(PNEUMONIA_dir)
normal_names = os.listdir(NORMAL_dir)
print("There is", len(pneumonia_names), "images of pneumonia infected in training dataset")
print("There is", len(normal_names), "normal images in training dataset")
fig = plt.gcf()
fig.set_size_inches(16, 8)
pic_index = 210
pneumonia_images = [os.path.join(PNEUMONIA_dir, fname)
for fname in pneumonia_names[pic_index-8:pic_index]]
for i, img_path in enumerate(pneumonia_images):
sp = plt.subplot(2, 4, i+1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
fig = plt.gcf()
fig.set_size_inches(16, 8)
pic_index = 210
normal_images = [os.path.join(NORMAL_dir, fname)
for fname in normal_names[pic_index-8:pic_index]]
for i, img_path in enumerate(normal_images):
sp = plt.subplot(2, 4, i+1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
Train = image_dataset_from_directory(
directory = 'D:/Programowanie (wszystko)/Pneumonia/chest_xray/chest_xray/train',
labels = "inferred",
label_mode = "categorical",
batch_size = 32,
image_size = (256, 256)
)
Test = image_dataset_from_directory(
directory = 'D:/Programowanie (wszystko)/Pneumonia/chest_xray/chest_xray/test',
labels = "inferred",
label_mode = "categorical",
batch_size = 32,
image_size = (256, 256)
)
Validation = image_dataset_from_directory(
directory = 'D:/Programowanie (wszystko)/Pneumonia/chest_xray/chest_xray/val',
labels = "inferred",
label_mode = "categorical",
batch_size = 32,
image_size = (256, 256)
)
model = tf.keras.models.Sequential([
layers.Conv2D(32, (3, 3), activation = "relu", input_shape = (256, 256, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation = "relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation = "relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation = "relu"),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dense(512, activation = "relu"),
layers.BatchNormalization(),
layers.Dense(512, activation = "relu"),
layers.Dropout(0.1),
layers.BatchNormalization(),
layers.Dense(512, activation = "relu"),
layers.Dropout(0.2),
layers.BatchNormalization(),
layers.Dense(512, activation = "relu"),
layers.Dropout(0.2),
layers.BatchNormalization(),
layers.Dense(2, activation = "sigmoid")
])
model.summary()
keras.utils.plot_model(
model,
show_shapes = True,
show_dtype = True,
show_layer_activations = True
)
model.compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
history = model.fit(Train,
epochs = 10,
validation_data = Validation)
Epoch 1/10
163/163 [==============================] – 59s 259ms/step – loss: 0.2657 – accuracy: 0.9128 – val_loss: 2.1434 – val_accuracy: 0.5625
Epoch 2/10
163/163 [==============================] – 34s 201ms/step – loss: 0.1493 – accuracy: 0.9505 – val_loss: 3.0297 – val_accuracy: 0.6250
Epoch 3/10
163/163 [==============================] – 34s 198ms/step – loss: 0.1107 – accuracy: 0.9626 – val_loss: 0.5933 – val_accuracy: 0.7500
Epoch 4/10
163/163 [==============================] – 33s 197ms/step – loss: 0.0992 – accuracy: 0.9640 – val_loss: 0.3691 – val_accuracy: 0.8125
Epoch 5/10
163/163 [==============================] – 34s 202ms/step – loss: 0.0968 – accuracy: 0.9651 – val_loss: 3.5919 – val_accuracy: 0.5000
Epoch 6/10
163/163 [==============================] – 34s 199ms/step – loss: 0.1012 – accuracy: 0.9653 – val_loss: 3.8678 – val_accuracy: 0.5000
Epoch 7/10
163/163 [==============================] – 34s 198ms/step – loss: 0.1026 – accuracy: 0.9613 – val_loss: 3.2006 – val_accuracy: 0.5625
Epoch 8/10
163/163 [==============================] – 35s 204ms/step – loss: 0.0785 – accuracy: 0.9701 – val_loss: 1.7824 – val_accuracy: 0.5000
Epoch 9/10
163/163 [==============================] – 34s 198ms/step – loss: 0.0717 – accuracy: 0.9745 – val_loss: 3.3485 – val_accuracy: 0.5625
Epoch 10/10
163/163 [==============================] – 35s 200ms/step – loss: 0.0699 – accuracy: 0.9770 – val_loss: 0.5788 – val_accuracy: 0.6250
What should I do with that?
Jakub Krzysztof Woźniak is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.