初めて利用します。chainerを利用してdata内の手持ち画像を学習させようとするとエラーが出て困っております。原因と対策をご教授いただきたいです。以下エラーコードとソースです。

エラー

Exception in main training loop: 
Traceback (most recent call last):
  File "/usr/local/lib/python3.5/dist-packages/chainer/training/trainer.py", line 306, in run
    update()
  File "/usr/local/lib/python3.5/dist-packages/chainer/training/updaters/standard_updater.py", line 149, in update
    self.update_core()
  File "/usr/local/lib/python3.5/dist-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
    in_arrays = self.converter(batch, self.device)
  File "/usr/local/lib/python3.5/dist-packages/chainer/dataset/convert.py", line 133, in concat_examples
    [example[i] for example in batch], padding[i])))
  File "/usr/local/lib/python3.5/dist-packages/chainer/dataset/convert.py", line 163, in _concat_arrays
    return xp.concatenate([array[None] for array in arrays])
Will finalize trainer extensions and updater before reraising the exception.
---------------------------------------------------------------------------
MemoryError                               Traceback (most recent call last)
<ipython-input-150-3287ea1d3afc> in <module>()
----> 1 trainer.run()

/usr/local/lib/python3.5/dist-packages/chainer/training/trainer.py in run(self, show_loop_exception_msg)
    318                 print('Will finalize trainer extensions and updater before '
    319                       'reraising the exception.', file=sys.stderr)
--> 320             six.reraise(*sys.exc_info())
    321         finally:
    322             for _, entry in extensions:

/usr/local/lib/python3.5/dist-packages/six.py in reraise(tp, value, tb)
    691             if value.__traceback__ is not tb:
    692                 raise value.with_traceback(tb)
--> 693             raise value
    694         finally:
    695             value = None

/usr/local/lib/python3.5/dist-packages/chainer/training/trainer.py in run(self, show_loop_exception_msg)
    304                 self.observation = {}
    305                 with reporter.scope(self.observation):
--> 306                     update()
    307                     for name, entry in extensions:
    308                         if entry.trigger(self):

/usr/local/lib/python3.5/dist-packages/chainer/training/updaters/standard_updater.py in update(self)
    147 
    148         """
--> 149         self.update_core()
    150         self.iteration += 1
    151 

/usr/local/lib/python3.5/dist-packages/chainer/training/updaters/standard_updater.py in update_core(self)
    152     def update_core(self):
    153         batch = self._iterators['main'].next()
--> 154         in_arrays = self.converter(batch, self.device)
    155 
    156         optimizer = self._optimizers['main']

/usr/local/lib/python3.5/dist-packages/chainer/dataset/convert.py in concat_examples(batch, device, padding)
    131         for i in six.moves.range(len(first_elem)):
    132             result.append(to_device(device, _concat_arrays(
--> 133                 [example[i] for example in batch], padding[i])))
    134 
    135         return tuple(result)

/usr/local/lib/python3.5/dist-packages/chainer/dataset/convert.py in _concat_arrays(arrays, padding)
    161     xp = cuda.get_array_module(arrays[0])
    162     with cuda.get_device_from_array(arrays[0]):
--> 163         return xp.concatenate([array[None] for array in arrays])
    164 
    165 

MemoryError: 

ソース

%matplotlib inline
from PIL import Image
from glob import glob
import numpy as np  # 数値計算
import pandas as pd  # データ操作
import matplotlib.pyplot as plt  # 可視化
import chainer
import chainer.functions as F #F.でパラメータない関数、Lはパラメータがある関数
import chainer.links as L
import scipy
from scipy.ndimage.interpolation import rotate
import cv2
from glob import glob

import random #いろんな乱数を全部まとめて固定
def reset_seed(seed=0):
    random.seed(seed)
    np.random.seed(seed)
if chainer.cuda.available:
    chainer.cuda.cupy.random.seed(seed)
reset_seed()

cat_paths = 'data/cat/*.jpg'
dog_paths = 'data/dog/*.jpg'
cat_files = glob(cat_paths)
dog_files = glob(dog_paths)

def cutout(image_origin, mask_size):
    # 最後に使うfill()は元の画像を書き換えるので、コピーしておく
    img = np.copy(image_origin)
    mask_value = img.mean()

    h, w, _ = img.shape
    # マスクをかける場所のtop, leftをランダムに決める
    # はみ出すことを許すので、0以上ではなく負の値もとる(最大mask_size // 2はみ出す)
    top = np.random.randint(0 - mask_size // 2, h - mask_size)
    left = np.random.randint(0 - mask_size // 2, w - mask_size)
    bottom = top + mask_size
    right = left + mask_size

    # はみ出した場合の処理
    if top < 0:
        top = 0
    if left < 0:
        left = 0

    # マスク部分の画素値を平均値で埋める
    img[top:bottom, left:right, :].fill(mask_value)
    return img

from scipy.misc import imresize
from scipy.ndimage.interpolation import rotate
def random_rotation(image, angle_range=(0, 180)):
    h, w, _ = image.shape
    angle = np.random.randint(*angle_range)
    image = rotate(image, angle)
    image = imresize(image, (h, w))
    return image

def random_crop(image, crop_size=(224, 224)):
    h, w, _ = image.shape

    # 0~(400-224)の間で画像のtop, leftを決める
    top = np.random.randint(0, h - crop_size[0])
    left = np.random.randint(0, w - crop_size[1])

    # top, leftから画像のサイズである224を足して、bottomとrightを決める
    bottom = top + crop_size[0]
    right = left + crop_size[1]

    # 決めたtop, bottom, left, rightを使って画像を抜き出す
    image = image[top:bottom, left:right, :]
    return image

def scale_augmentation(image, scale_range=(256, 400), crop_size=224):
    scale_size = np.random.randint(*scale_range)
    image = imresize(image, (scale_size, scale_size))
    image = random_crop(image, (crop_size, crop_size))
    return image

#cat元画像格納場所
x_basec,t_basec = [],[]
#aug(cat)格納場所(augの種類ごとに格納)
x_augc1,t_augc1 = [],[]#横反転
x_augc2,t_augc2 = [],[]#縦反転
x_augc3,t_augc3 = [],[]#マスク
x_augc4,t_augc4 = [],[]#回転
x_augc5,t_augc5 = [],[]#スケール


for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = np.transpose(img,(2, 0, 1))
    x_basec.append(img)
    t_basec.append(np.array(0, 'i'))  # 猫は0

for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = img[:, ::-1, :]#横反転
    img = np.transpose(img,(2, 0, 1))
    x_augc1.append(img)
    t_augc1.append(np.array(0, 'i'))  # 猫は0

for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = img[::-1, :, :]#縦反転
    img = np.transpose(img,(2, 0, 1))
    x_augc2.append(img)
    t_augc2.append(np.array(0, 'i'))  # 猫は0

for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = cutout(img, 100)#マスク関数
    img = np.transpose(img,(2, 0, 1))
    x_augc3.append(img)
    t_augc3.append(np.array(0, 'i'))  # 猫は0 

for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = random_rotation(img)#回転関数
    img = np.transpose(img,(2, 0, 1))
    x_augc4.append(img)
    t_augc4.append(np.array(0, 'i'))  # 猫は0 

for filepath in cat_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = scale_augmentation(img)#スケール関数
    img = np.transpose(img,(2, 0, 1))
    x_augc5.append(img)
    t_augc5.append(np.array(0, 'i'))  # 猫は0 

#dog元画像格納場所
x_based,t_based = [],[]
#aug(dog)格納場所(augの種類ごとに格納)
x_augd1,t_augd1 = [],[]#横反転
x_augd2,t_augd2 = [],[]#縦反転
x_augd3,t_augd3 = [],[]#マスク
x_augd4,t_augd4 = [],[]#回転
x_augd5,t_augd5 = [],[]#スケール

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = np.transpose(img,(2, 0, 1))
    x_based.append(img)
    t_based.append(np.array(1, 'i'))  # 犬は1

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = img[:, ::-1, :]#横反転
    img = np.transpose(img,(2, 0, 1))
    x_augd1.append(img)
    t_augd1.append(np.array(1, 'i'))  # 犬は1

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = img[::-1, :, :]#縦反転
    img = np.transpose(img,(2, 0, 1))
    x_augd2.append(img)
    t_augd2.append(np.array(1, 'i'))  # 犬は1

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = cutout(img, 200)#マスク関数
    img = np.transpose(img,(2, 0, 1))
    x_augd3.append(img)
    t_augd3.append(np.array(1, 'i'))  # 犬は1 

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = random_rotation(img)#回転関数
    img = np.transpose(img,(2, 0, 1))
    x_augd4.append(img)
    t_augd4.append(np.array(1, 'i'))  # 犬は1 

for filepath in dog_files:
    img = Image.open(filepath)
    img = np.array(img, 'f')#この下に水増し関数いれる
    img = scale_augmentation(img)#スケール関数
    img = np.transpose(img,(2, 0, 1))
    x_augd5.append(img)
    t_augd5.append(np.array(1, 'i'))  # 犬は1 

x_train, x_test = [],[]
t_train, t_test = [],[]
for filepath in cat_files:
    if random.random() <0.7 : #訓練とテストの分割をランダム化
        x_train.append(x_basec)
        t_train.append(t_basec)  
        x_train.append(x_augc1)
        t_train.append(x_augc1)
        x_train.append(x_augc2)
        t_train.append(x_augc2)
        x_train.append(x_augc3)
        t_train.append(x_augc3)
        x_train.append(x_augc4)
        t_train.append(x_augc4)
        x_train.append(x_augc5)
        t_train.append(x_augc5)
    else:     
        x_test.append(x_basec)
        t_test.append(t_basec)   

for filepath in dog_files:
    if random.random() <0.7 : #訓練とテストの分割をランダム化
        x_train.append(x_based)
        t_train.append(t_based)  
        x_train.append(x_augd1)
        t_train.append(x_augd1)
        x_train.append(x_augd2)
        t_train.append(x_augd2)
        x_train.append(x_augd3)
        t_train.append(x_augd3)
        x_train.append(x_augd4)
        t_train.append(x_augd4)
        x_train.append(x_augd5)
        t_train.append(x_augd5)
    else:     
        x_test.append(x_based)
        t_test.append(t_based) 

train = chainer.datasets.TupleDataset(x_train,t_train) 
test = chainer.datasets.TupleDataset(x_test,t_test) 

class CNN(chainer.Chain):

    def __init__(self, n_mid=100, n_out=2):
        super().__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(in_channels=1, out_channels=3, ksize=3, stride=1, pad=1)
            self.fc1 = L.Linear(None, n_mid)
            self.fc2 = L.Linear(None, n_out)

    def __call__(self, x):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, 3)
        h = self.fc1(h)
        h = self.fc2(h)
        return h
import random

def reset_seed(seed=0):
    random.seed(seed)
    np.random.seed(seed)
    if chainer.cuda.available:
        chainer.cuda.cupy.random.seed(seed)
# CPUとGPU関連のシードをすべて固定
reset_seed(0)

gpu_id = 0  # 使用したGPUに割り振られているID
model.to_gpu(gpu_id)
# Optimizerの定義とmodelとの紐づけ
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
batchsize = 128
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)
from chainer import training
from chainer.training import extensions

epoch = 30

updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)

trainer = training.Trainer(updater, (epoch, 'epoch'), out='dc0')

# バリデーション用のデータで評価
trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id))

# 学習結果の途中を表示する
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))

# 1エポックごとに結果をlogファイルに出力させる
trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy', 'main/loss', 'validation/main/loss', 'elapsed_time']), trigger=(1, 'epoch'))
trainer.run()