In [1]:
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
In [14]:
import pathlib
data_dir = pathlib.Path("images")
In [16]:
image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)
warhol = list(data_dir.glob('Andy_Warhol/*'))
PIL.Image.open(str(warhol[0]))
8774
Out[16]:
In [17]:
batch_size = 4
img_height = 180
img_width = 180

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split = 0.2,
    subset = "training",
    seed = 123,
    image_size = (img_height, img_width),
    batch_size = batch_size
)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split = 0.2,
    subset = "validation",
    seed = 123,
    image_size = (img_height, img_width),
    batch_size = batch_size
)

class_names = train_ds.class_names
print(class_names)
Found 8775 files belonging to 51 classes.
Using 7020 files for training.
Found 8775 files belonging to 51 classes.
Using 1755 files for validation.
['Albrecht_Dürer', 'Albrecht_Du╠êrer', 'Alfred_Sisley', 'Amedeo_Modigliani', 'Andrei_Rublev', 'Andy_Warhol', 'Camille_Pissarro', 'Caravaggio', 'Claude_Monet', 'Diego_Rivera', 'Diego_Velazquez', 'Edgar_Degas', 'Edouard_Manet', 'Edvard_Munch', 'El_Greco', 'Eugene_Delacroix', 'Francisco_Goya', 'Frida_Kahlo', 'Georges_Seurat', 'Giotto_di_Bondone', 'Gustav_Klimt', 'Gustave_Courbet', 'Henri_Matisse', 'Henri_Rousseau', 'Henri_de_Toulouse-Lautrec', 'Hieronymus_Bosch', 'Jackson_Pollock', 'Jan_van_Eyck', 'Joan_Miro', 'Kazimir_Malevich', 'Leonardo_da_Vinci', 'Marc_Chagall', 'Michelangelo', 'Mikhail_Vrubel', 'Pablo_Picasso', 'Paul_Cezanne', 'Paul_Gauguin', 'Paul_Klee', 'Peter_Paul_Rubens', 'Pierre-Auguste_Renoir', 'Piet_Mondrian', 'Pieter_Bruegel', 'Raphael', 'Rembrandt', 'Rene_Magritte', 'Salvador_Dali', 'Sandro_Botticelli', 'Titian', 'Vasiliy_Kandinskiy', 'Vincent_van_Gogh', 'William_Turner']
In [42]:
AUTOTUNE = tf.data.experimental.AUTOTUNE

train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
In [43]:
inputShape = (180, 180, 3)

num_classes = 51

model = Sequential([
  layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Dropout(0.2),
  layers.Flatten(),
  layers.Dense(128, activation='relu'),
  layers.Dense(num_classes)
])
In [44]:
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

model.summary()
Model: "sequential_13"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
rescaling_12 (Rescaling)     (None, 180, 180, 3)       0         
_________________________________________________________________
conv2d_34 (Conv2D)           (None, 180, 180, 16)      448       
_________________________________________________________________
max_pooling2d_25 (MaxPooling (None, 90, 90, 16)        0         
_________________________________________________________________
conv2d_35 (Conv2D)           (None, 90, 90, 32)        4640      
_________________________________________________________________
max_pooling2d_26 (MaxPooling (None, 45, 45, 32)        0         
_________________________________________________________________
conv2d_36 (Conv2D)           (None, 45, 45, 64)        18496     
_________________________________________________________________
max_pooling2d_27 (MaxPooling (None, 22, 22, 64)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 22, 22, 64)        0         
_________________________________________________________________
flatten_12 (Flatten)         (None, 30976)             0         
_________________________________________________________________
dense_22 (Dense)             (None, 128)               3965056   
_________________________________________________________________
dense_23 (Dense)             (None, 51)                6579      
=================================================================
Total params: 3,995,219
Trainable params: 3,995,219
Non-trainable params: 0
_________________________________________________________________
In [45]:
#this step may take a while
epochs = 10
history = model.fit(
  train_ds,
  validation_data=val_ds,
  epochs=epochs
)
Epoch 1/10
1755/1755 [==============================] - 162s 92ms/step - loss: 3.2556 - accuracy: 0.1762 - val_loss: 2.9420 - val_accuracy: 0.2302
Epoch 2/10
1755/1755 [==============================] - 172s 98ms/step - loss: 2.6464 - accuracy: 0.2930 - val_loss: 2.6544 - val_accuracy: 0.3083
Epoch 3/10
1755/1755 [==============================] - 166s 95ms/step - loss: 2.0131 - accuracy: 0.4397 - val_loss: 2.7561 - val_accuracy: 0.3060
Epoch 4/10
1755/1755 [==============================] - 188s 107ms/step - loss: 1.2948 - accuracy: 0.6124 - val_loss: 3.2033 - val_accuracy: 0.2934
Epoch 5/10
1755/1755 [==============================] - 186s 106ms/step - loss: 0.7538 - accuracy: 0.7645 - val_loss: 4.1692 - val_accuracy: 0.2781
Epoch 6/10
1755/1755 [==============================] - 184s 105ms/step - loss: 0.4820 - accuracy: 0.8449 - val_loss: 4.9365 - val_accuracy: 0.2741
Epoch 7/10
1755/1755 [==============================] - 189s 108ms/step - loss: 0.3413 - accuracy: 0.8801 - val_loss: 6.2074 - val_accuracy: 0.2803
Epoch 8/10
1755/1755 [==============================] - 183s 104ms/step - loss: 0.2932 - accuracy: 0.8949 - val_loss: 6.5173 - val_accuracy: 0.2672
Epoch 9/10
1755/1755 [==============================] - 185s 105ms/step - loss: 0.2322 - accuracy: 0.9117 - val_loss: 6.9431 - val_accuracy: 0.2877
Epoch 10/10
1755/1755 [==============================] - 173s 98ms/step - loss: 0.2538 - accuracy: 0.9085 - val_loss: 7.5362 - val_accuracy: 0.2832