25 Class developed for the implementation of face recognition. 29 Initialise class using SAMDriver.__init__ and augment with custom parameters. 31 additionalParameterList is a list of extra parameters to preserve between training and interaction. 33 SAMDriver.__init__(self)
35 'image_suffix',
'pose_index',
'pose_selection']
39 Function to load parameters from the model config.ini file. 41 Method to load parameters from file loaded in parser from within section trainName and store these parameters in self.paramsDict. 44 parser: SafeConfigParser with pre-read config file. 45 trainName: Section from which parameters are to be read. 50 if parser.has_option(trainName,
'imgH'):
51 self.
paramsDict[
'imgH'] = int(parser.get(trainName,
'imgH'))
55 if parser.has_option(trainName,
'imgW'):
56 self.
paramsDict[
'imgW'] = int(parser.get(trainName,
'imgW'))
60 if parser.has_option(trainName,
'imgHNew'):
61 self.
paramsDict[
'imgHNew'] = int(parser.get(trainName,
'imgHNew'))
65 if parser.has_option(trainName,
'imgWNew'):
66 self.
paramsDict[
'imgWNew'] = int(parser.get(trainName,
'imgWNew'))
70 if parser.has_option(trainName,
'image_suffix'):
71 self.
paramsDict[
'image_suffix'] = parser.get(trainName,
'image_suffix')
75 if parser.has_option(trainName,
'pose_index'):
76 self.
paramsDict[
'pose_index'] = list(parser.get(trainName,
'pose_index').replace(
'\'',
'').split(
','))
80 if parser.has_option(trainName,
'pose_selection'):
81 self.
paramsDict[
'pose_selection'] = int(parser.get(trainName,
'pose_selection'))
87 Executes SAMDriver.saveParameters to save default parameters. 89 SAMDriver.saveParameters(self)
93 def readData(self, root_data_dir, participant_index, *args, **kw):
95 Method which accepts a data directory, reads all the data in and outputs self.Y which is a numpy array with n instances of m length feature vectors and self.L which is a list of text Labels of length n. 97 This method reads .ppm images from disk, converts the images to grayscale and serialises the data into a feature vector. 100 root_data_dir: Data directory. 101 participant_index: List of subfolders to consider. Can be left as an empty list. 105 if not os.path.exists(root_data_dir):
106 logging.error(
"CANNOT FIND:" + root_data_dir)
108 logging.info(
"PATH FOUND")
111 data_file_count = numpy.zeros([len(participant_index), len(self.
paramsDict[
'pose_index'])])
112 data_file_database = {}
113 for count_participant, current_participant
in enumerate(participant_index):
114 data_file_database_part = {}
115 for count_pose, current_pose
in enumerate(self.
paramsDict[
'pose_index']):
116 current_data_dir = os.path.join(root_data_dir, current_participant + current_pose)
117 data_file_database_p = numpy.empty(0, dtype=[(
'orig_file_id',
'i2'), (
'file_id',
'i2'),
118 (
'img_fname',
'a100')])
120 if os.path.exists(current_data_dir):
121 for fileN
in os.listdir(current_data_dir):
123 fileName, fileExtension = os.path.splitext(fileN)
124 if fileExtension == self.
paramsDict[
'image_suffix']:
125 file_ttt = numpy.empty(1, dtype=[(
'orig_file_id',
'i2'), (
'file_id',
'i2'),
126 (
'img_fname',
'a100')])
127 file_ttt[
'orig_file_id'][0] = int(fileName)
128 file_ttt[
'img_fname'][0] = fileN
129 file_ttt[
'file_id'][0] = data_image_count
130 data_file_database_p = numpy.append(data_file_database_p, file_ttt, axis=0)
131 data_image_count += 1
132 data_file_database_p = numpy.sort(data_file_database_p, order=[
'orig_file_id'])
133 data_file_database_part[self.
paramsDict[
'pose_index'][count_pose]] = data_file_database_p
134 data_file_count[count_participant, count_pose] = len(data_file_database_p)
135 data_file_database[participant_index[count_participant]] = data_file_database_part
139 min_no_images = int(numpy.min(data_file_count))
143 data_image = cv2.imread(
144 os.path.join(root_data_dir, participant_index[0] + self.
paramsDict[
'pose_index'][0] +
"/" +
145 data_file_database[participant_index[0]][
146 self.
paramsDict[
'pose_index'][0]][0][2]))[:, :, (2, 1, 0)]
149 logging.info(
"Found minimum number of images:" + str(min_no_images))
150 logging.info(
"Image count:" + str(data_file_count))
151 logging.info(
"Found image with dimensions" + str(data_image.shape))
160 set_x = int(data_image.shape[0])
161 set_y = int(data_image.shape[1])
164 img_data = numpy.zeros(
165 [min_no_images*len(participant_index)*len(self.
paramsDict[
'pose_index']), no_pixels])
170 for count_pose, current_pose
in enumerate(self.
paramsDict[
'pose_index']):
171 for count_participant, current_participant
in enumerate(participant_index):
172 for current_image
in range(min_no_images):
173 current_image_path = os.path.join(os.path.join(root_data_dir,
174 participant_index[count_participant] +
175 self.
paramsDict[
'pose_index'][count_pose] +
"/" +
177 participant_index[count_participant]][
180 data_image = cv2.imread(current_image_path)
182 if data_image.shape[0] < set_x
or data_image.shape[1] < set_y:
183 logging.error(
"Image too small... EXITING:")
184 logging.error(
"Found image with dimensions" + str(data_image.shape))
186 if data_image.shape[0] > set_x
or data_image.shape[1] > set_y:
187 logging.warning(
"Found image with dimensions" + str(data_image.shape))
188 logging.warning(
"Image too big cutting to: x=" + str(set_x) +
" y=" + str(set_y))
189 data_image = data_image[:set_x, :set_y]
191 data_image = cv2.cvtColor(data_image, cv2.COLOR_BGR2GRAY)
193 img_data[countPos, :] = data_image.flatten()
196 img_label_data.append(participant_index[count_participant])
199 self.
L = img_label_data
200 return self.
Y.shape[1]
202 def processLiveData(self, dataList, thisModel, verbose, additionalData=dict()):
204 Method which receives a list of data frames and outputs a classification if available or 'no_classification' if it is not 207 dataList: List of dataFrames collected. Length of list is variable. 208 thisModel: List of models required for testing. 209 verbose : Boolean turning logging to stdout on or off. 210 additionalData : Dictionary containing additional data required for classification to occur. 212 String with result of classification, likelihood of the classification, and list of frames with the latest x number of frames popped where x is the window length of the model. Classification result can be string `'None'` if the classification is unknown or message is invalid or `None` if a different error occurs. 215 logging.info(
'process live data')
216 logging.info(len(dataList))
218 imgH = thisModel[0].paramsDict[
'imgH']
219 imgW = thisModel[0].paramsDict[
'imgW']
220 imgHNew = thisModel[0].paramsDict[
'imgHNew']
221 imgWNew = thisModel[0].paramsDict[
'imgWNew']
222 numFaces = len(dataList)
224 imageArray = numpy.zeros((imgH, imgW, 3), dtype=numpy.uint8)
225 yarpImage = yarp.ImageRgb()
226 yarpImage.resize(imgH, imgW)
227 yarpImage.setExternal(imageArray, imageArray.shape[1], imageArray.shape[0])
230 labels = [
None]*numFaces
231 likelihoods = [
None]*numFaces
235 for i
in range(numFaces):
236 logging.info(
'iterating' + str(i))
237 yarpImage.copy(dataList[i])
238 imageArrayOld = cv2.resize(imageArray, (imgHNew, imgWNew))
239 imageArrayGray = cv2.cvtColor(imageArrayOld, cv2.COLOR_BGR2GRAY)
240 instance = imageArrayGray.flatten()[
None, :]
241 logging.info(instance.shape)
242 logging.info(
"Collected face: " + str(i))
243 logging.info(
'testing enter')
244 [labels[i], likelihoods[i]] = SAMTesting.testSegment(thisModel, instance, verbose,
None)
245 logging.info(
'testing leave')
246 logging.info(
'combine enter')
247 finalClassLabel, finalClassProb = SAMTesting.combineClassifications(thisModel, labels, likelihoods)
248 logging.info(
'combine ready')
249 logging.info(
'finalClassLabels ' + str(finalClassLabel))
250 logging.info(
'finalClassProbs ' + str(finalClassProb))
251 return finalClassLabel, finalClassProb, []
253 return [
None, 0,
None]
257 Method to transform a generated instance from the model into a Yarp formatted output. 260 instance: Feature vector returned during generation of a label. 263 Yarp formatted output for instance. 266 yMin = instance.min()
268 yMax = instance.max()
271 instance = instance.astype(numpy.uint8)
275 yarpImage = yarp.ImageMono()
277 instance = instance.astype(numpy.uint8)
278 yarpImage.setExternal(instance, instance.shape[1], instance.shape[0])
def processLiveData(self, dataList, thisModel, verbose, additionalData=dict())
Method which receives a list of data frames and outputs a classification if available or 'no_classifi...
def readData(self, root_data_dir, participant_index, args, kw)
Method which accepts a data directory, reads all the data in and outputs self.Y which is a numpy arra...
def loadParameters(self, parser, trainName)
Function to load parameters from the model config.ini file.
def saveParameters(self)
Executes SAMDriver.saveParameters to save default parameters.
SAM Driver parent class that defines the methods by which models are initialised, trained and saved...
Class developed for the implementation of face recognition.
def formatGeneratedData(self, instance)
Method to transform a generated instance from the model into a Yarp formatted output.
def __init__(self)
Initialise class using SAMDriver.__init__ and augment with custom parameters.