123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156 |
- # -*- coding: utf-8 -*-
- """
- Created on Thu Nov 8 22:24:55 2018
- @author: fdrea
- """
- import os
- import cv2
- import h5py
- import numpy
- from scipy import arange
- import matplotlib.pyplot as plt
- from PIL import Image
- #DATA_PATH = "./data/Trainingset/" # for train
- #DATA_PATH = "./data/Train_291_aug/" # for train 291 augmentation
- #DATA_PATH = "./data/Train_291/" # for train 201 images
- DATA_PATH = "./data/Test/Set5/" # for validation
- #DATA_PATH = "./data/Set1/" # for try
- Random_Crop = 30 # number of random patches
- Patch_size = 32
- label_size = 32
- scale = 2
- blurring_levels = 40
- #b= 2
- def prepare_training_data():
- names = os.listdir(DATA_PATH)
- names = sorted(names)
- nums = names.__len__()
- count = 0
-
- #data = numpy.zeros((nums * Random_Crop, 1, Patch_size, Patch_size), dtype=numpy.double)
- #label = numpy.zeros((nums * Random_Crop, 1, label_size, label_size), dtype=numpy.double)
-
- # number of saved batches in h5 file
- data = numpy.zeros((nums * Random_Crop * blurring_levels, 1, Patch_size, Patch_size), dtype=numpy.double)
- label = numpy.zeros((nums * Random_Crop * blurring_levels, 1, label_size, label_size), dtype=numpy.double)
- #imshow = plt.imshow
-
- for i in range(nums):
- print('i = ', i)
- name = DATA_PATH + names[i]
- hr_img = cv2.imread(name, cv2.IMREAD_COLOR)
- shape = hr_img.shape
- hr_img = cv2.cvtColor(hr_img, cv2.COLOR_BGR2YCrCb)
- hr_img = hr_img[:, :, 0]
- # produce Random_Crop random coordinate to crop training img
- if(min(shape[0], shape[1]) - label_size < 0):
- continue
- Points_x = numpy.random.randint(0, min(shape[0], shape[1]) - label_size, Random_Crop)
- Points_y = numpy.random.randint(0, min(shape[0], shape[1]) - label_size, Random_Crop)
-
- for b in arange(0.1, 4.1, 0.1):
- b = round(b, 2)
- print('b = ', b)
-
- for j in range(Random_Crop):
- print('j = ', j)
- hr_patch = hr_img[Points_x[j]: Points_x[j] + label_size, Points_y[j]: Points_y[j] + label_size]
-
- #cv2.imshow("hr", hr_patch)
-
- #imshow(hr_patch)
- #plt.show()
-
- lr_patch = cv2.resize(hr_patch, (label_size // scale, label_size // scale), cv2.INTER_CUBIC)
-
- #cv2.imshow("down", lr_patch)
-
- #imshow(lr_patch)
- #plt.show()
-
- lr_patch = cv2.resize(lr_patch , (lr_patch.shape[1] * scale, lr_patch.shape[0] * scale), cv2.INTER_CUBIC)
-
- #cv2.imshow("bicubic", lr_patch)
-
- lr_patch = cv2.GaussianBlur(lr_patch, (0,0), sigmaX = b) # to blur
-
- #cv2.imshow("blur_lr", lr_patch)
-
- #imshow(lr_patch)
- #plt.show()
-
- lr_patch = lr_patch.astype(float) / 255.
-
- #imshow(lr_patch)
- #plt.show()
-
- hr_patch = hr_patch.astype(float) / 255.
-
- #imshow(hr_patch)
- #plt.show()
-
- data[count, 0, :, :] = lr_patch
- label[count, 0, :, :] = hr_patch
- #cv2.imshow("lr/255", lr_patch)
- #cv2.imshow("hr/255", hr_patch)
- #cv2.waitKey(0)
- count= count+1
-
- print('number of samples', count)
- return data, label
- def write_hdf5(data, labels, output_filename):
- """
- This function is used to save image data and its label(s) to hdf5 file.
- output_file.h5,contain data and label
- """
- x = data.astype(numpy.float32)
- y = labels.astype(numpy.float32)
- with h5py.File(output_filename, 'w') as h:
- h.create_dataset('data', data=x, shape=x.shape)
- h.create_dataset('label', data=y, shape=y.shape)
- # h.create_dataset()
- def read_training_data(file):
- with h5py.File(file, 'r') as hf:
- data = numpy.array(hf.get('data'))
- label = numpy.array(hf.get('label'))
- train_data = numpy.transpose(data, (0, 2, 3, 1))
- train_label = numpy.transpose(label, (0, 2, 3, 1))
- return train_data, train_label
- if __name__ == "__main__":
- data, label = prepare_training_data()
- #write_hdf5(data, label, "try_train.h5")
-
- # for training 91 images:
- #write_hdf5(data, label, "train91.h5")
-
- # for training 291 images:
- #write_hdf5(data, label, "train291.h5")
-
- # for training 291 aug images:
- #write_hdf5(data, label, "train_aug.h5")
-
- # for validation Set5:
- write_hdf5(data, label, "val.h5")
-
- #_, _a = read_training_data("train.h5")
- #_, _a = read_training_data("test.h5")
|