icub-client
SAMDriver_interaction.py
Go to the documentation of this file.
1 # """"""""""""""""""""""""""""""""""""""""""""""
2 # The University of Sheffield
3 # WYSIWYD Project
4 #
5 # SAMpy class for implementation of SAM module
6 #
7 # Created on 26 May 2015
8 #
9 # @authors: Uriel Martinez, Luke Boorman, Andreas Damianou
10 #
11 # """"""""""""""""""""""""""""""""""""""""""""""
12 import sys
13 import numpy
14 import os
15 import cv2
16 import readline
17 import yarp
18 from SAM.SAM_Core import SAMDriver
19 from SAM.SAM_Core import SAMTesting
20 import logging
21 
22 
24  """
25  Class developed for the implementation of face recognition.
26  """
27  def __init__(self):
28  """
29  Initialise class using SAMDriver.__init__ and augment with custom parameters.
30 
31  additionalParameterList is a list of extra parameters to preserve between training and interaction.
32  """
33  SAMDriver.__init__(self)
34  self.additionalParametersList = ['imgH', 'imgW', 'imgHNew', 'imgWNew',
35  'image_suffix', 'pose_index', 'pose_selection']
36 
37  def loadParameters(self, parser, trainName):
38  """
39  Function to load parameters from the model config.ini file.
40 
41  Method to load parameters from file loaded in parser from within section trainName and store these parameters in self.paramsDict.
42 
43  Args:
44  parser: SafeConfigParser with pre-read config file.
45  trainName: Section from which parameters are to be read.
46 
47  Returns:
48  None
49  """
50  if parser.has_option(trainName, 'imgH'):
51  self.paramsDict['imgH'] = int(parser.get(trainName, 'imgH'))
52  else:
53  self.paramsDict['imgH'] = 400
54 
55  if parser.has_option(trainName, 'imgW'):
56  self.paramsDict['imgW'] = int(parser.get(trainName, 'imgW'))
57  else:
58  self.paramsDict['imgW'] = 400
59 
60  if parser.has_option(trainName, 'imgHNew'):
61  self.paramsDict['imgHNew'] = int(parser.get(trainName, 'imgHNew'))
62  else:
63  self.paramsDict['imgHNew'] = 200
64 
65  if parser.has_option(trainName, 'imgWNew'):
66  self.paramsDict['imgWNew'] = int(parser.get(trainName, 'imgWNew'))
67  else:
68  self.paramsDict['imgWNew'] = 200
69 
70  if parser.has_option(trainName, 'image_suffix'):
71  self.paramsDict['image_suffix'] = parser.get(trainName, 'image_suffix')
72  else:
73  self.paramsDict['image_suffix'] = '.ppm'
74 
75  if parser.has_option(trainName, 'pose_index'):
76  self.paramsDict['pose_index'] = list(parser.get(trainName, 'pose_index').replace('\'', '').split(','))
77  else:
78  self.paramsDict['pose_index'] = ['']
79 
80  if parser.has_option(trainName, 'pose_selection'):
81  self.paramsDict['pose_selection'] = int(parser.get(trainName, 'pose_selection'))
82  else:
83  self.paramsDict['pose_selection'] = 0
84 
85  def saveParameters(self):
86  """
87  Executes SAMDriver.saveParameters to save default parameters.
88  """
89  SAMDriver.saveParameters(self)
90 
91 
92  # """"""""""""""""
93  def readData(self, root_data_dir, participant_index, *args, **kw):
94  """
95  Method which accepts a data directory, reads all the data in and outputs self.Y which is a numpy array with n instances of m length feature vectors and self.L which is a list of text Labels of length n.
96 
97  This method reads .ppm images from disk, converts the images to grayscale and serialises the data into a feature vector.
98 
99  Args:
100  root_data_dir: Data directory.
101  participant_index: List of subfolders to consider. Can be left as an empty list.
102 
103  Returns:
104  """
105  if not os.path.exists(root_data_dir):
106  logging.error("CANNOT FIND:" + root_data_dir)
107  else:
108  logging.info("PATH FOUND")
109 
110  # Find and build index of available images.......
111  data_file_count = numpy.zeros([len(participant_index), len(self.paramsDict['pose_index'])])
112  data_file_database = {}
113  for count_participant, current_participant in enumerate(participant_index):
114  data_file_database_part = {}
115  for count_pose, current_pose in enumerate(self.paramsDict['pose_index']):
116  current_data_dir = os.path.join(root_data_dir, current_participant + current_pose)
117  data_file_database_p = numpy.empty(0, dtype=[('orig_file_id', 'i2'), ('file_id', 'i2'),
118  ('img_fname', 'a100')])
119  data_image_count = 0
120  if os.path.exists(current_data_dir):
121  for fileN in os.listdir(current_data_dir):
122  # parts = re.split("[-,\.]", file)
123  fileName, fileExtension = os.path.splitext(fileN)
124  if fileExtension == self.paramsDict['image_suffix']: # Check for image file
125  file_ttt = numpy.empty(1, dtype=[('orig_file_id', 'i2'), ('file_id', 'i2'),
126  ('img_fname', 'a100')])
127  file_ttt['orig_file_id'][0] = int(fileName)
128  file_ttt['img_fname'][0] = fileN
129  file_ttt['file_id'][0] = data_image_count
130  data_file_database_p = numpy.append(data_file_database_p, file_ttt, axis=0)
131  data_image_count += 1
132  data_file_database_p = numpy.sort(data_file_database_p, order=['orig_file_id'])
133  data_file_database_part[self.paramsDict['pose_index'][count_pose]] = data_file_database_p
134  data_file_count[count_participant, count_pose] = len(data_file_database_p)
135  data_file_database[participant_index[count_participant]] = data_file_database_part
136 
137  # To access use both dictionaries data_file_database['Luke']['LR']
138  # Cutting indexes to smllest number of available files -> Using file count
139  min_no_images = int(numpy.min(data_file_count))
140 
141  # Load image data into array......
142  # Load first image to get sizes....
143  data_image = cv2.imread(
144  os.path.join(root_data_dir, participant_index[0] + self.paramsDict['pose_index'][0] + "/" +
145  data_file_database[participant_index[0]][
146  self.paramsDict['pose_index'][0]][0][2]))[:, :, (2, 1, 0)] # Convert BGR to RGB
147 
148  # Data size
149  logging.info("Found minimum number of images:" + str(min_no_images))
150  logging.info("Image count:" + str(data_file_count))
151  logging.info("Found image with dimensions" + str(data_image.shape))
152  # imgplot = plt.imshow(data_image)#[:,:,(2,1,0)]) # convert BGR to RGB
153 
154  # Load all images....
155  # Data Dimensions:
156  # 1. Pixels (e.g. 200x200)
157  # 2. Images
158  # 3. Person
159  # 4. Movement (Static. up/down. left / right)
160  set_x = int(data_image.shape[0])
161  set_y = int(data_image.shape[1])
162  # no_rgb=int(data_image.shape[2])
163  no_pixels = self.paramsDict['imgWNew'] * self.paramsDict['imgHNew'] # set_x*set_y
164  img_data = numpy.zeros(
165  [min_no_images*len(participant_index)*len(self.paramsDict['pose_index']), no_pixels])
166  img_label_data = []
167  # cv2.imshow("test", data_image)
168  # cv2.waitKey(50)
169  countPos = 0
170  for count_pose, current_pose in enumerate(self.paramsDict['pose_index']):
171  for count_participant, current_participant in enumerate(participant_index):
172  for current_image in range(min_no_images):
173  current_image_path = os.path.join(os.path.join(root_data_dir,
174  participant_index[count_participant] +
175  self.paramsDict['pose_index'][count_pose] + "/" +
176  data_file_database[
177  participant_index[count_participant]][
178  self.paramsDict['pose_index'][count_pose]][
179  current_image][2]))
180  data_image = cv2.imread(current_image_path)
181  # Check image is the same size if not... cut or reject
182  if data_image.shape[0] < set_x or data_image.shape[1] < set_y:
183  logging.error("Image too small... EXITING:")
184  logging.error("Found image with dimensions" + str(data_image.shape))
185  sys.exit(0)
186  if data_image.shape[0] > set_x or data_image.shape[1] > set_y:
187  logging.warning("Found image with dimensions" + str(data_image.shape))
188  logging.warning("Image too big cutting to: x=" + str(set_x) + " y=" + str(set_y))
189  data_image = data_image[:set_x, :set_y]
190  data_image = cv2.resize(data_image, (self.paramsDict['imgWNew'], self.paramsDict['imgHNew'])) # New
191  data_image = cv2.cvtColor(data_image, cv2.COLOR_BGR2GRAY)
192  # Data is flattened into single vector (inside matrix of all images) -> (from images)
193  img_data[countPos, :] = data_image.flatten()
194  countPos += 1
195  # Labelling with participant
196  img_label_data.append(participant_index[count_participant])
197 
198  self.Y = img_data
199  self.L = img_label_data
200  return self.Y.shape[1]
201 
202  def processLiveData(self, dataList, thisModel, verbose, additionalData=dict()):
203  """
204  Method which receives a list of data frames and outputs a classification if available or 'no_classification' if it is not
205 
206  Args:
207  dataList: List of dataFrames collected. Length of list is variable.
208  thisModel: List of models required for testing.
209  verbose : Boolean turning logging to stdout on or off.
210  additionalData : Dictionary containing additional data required for classification to occur.
211  Returns:
212  String with result of classification, likelihood of the classification, and list of frames with the latest x number of frames popped where x is the window length of the model. Classification result can be string `'None'` if the classification is unknown or message is invalid or `None` if a different error occurs.
213  """
214 
215  logging.info('process live data')
216  logging.info(len(dataList))
217 
218  imgH = thisModel[0].paramsDict['imgH']
219  imgW = thisModel[0].paramsDict['imgW']
220  imgHNew = thisModel[0].paramsDict['imgHNew']
221  imgWNew = thisModel[0].paramsDict['imgWNew']
222  numFaces = len(dataList)
223 
224  imageArray = numpy.zeros((imgH, imgW, 3), dtype=numpy.uint8)
225  yarpImage = yarp.ImageRgb()
226  yarpImage.resize(imgH, imgW)
227  yarpImage.setExternal(imageArray, imageArray.shape[1], imageArray.shape[0])
228 
229  # images = numpy.zeros((numFaces, imgHNew * imgWNew), dtype=numpy.uint8)
230  labels = [None]*numFaces
231  likelihoods = [None]*numFaces
232 
233  if numFaces > 0:
234  # average all faces
235  for i in range(numFaces):
236  logging.info('iterating' + str(i))
237  yarpImage.copy(dataList[i])
238  imageArrayOld = cv2.resize(imageArray, (imgHNew, imgWNew))
239  imageArrayGray = cv2.cvtColor(imageArrayOld, cv2.COLOR_BGR2GRAY)
240  instance = imageArrayGray.flatten()[None, :]
241  logging.info(instance.shape)
242  logging.info("Collected face: " + str(i))
243  logging.info('testing enter')
244  [labels[i], likelihoods[i]] = SAMTesting.testSegment(thisModel, instance, verbose, None)
245  logging.info('testing leave')
246  logging.info('combine enter')
247  finalClassLabel, finalClassProb = SAMTesting.combineClassifications(thisModel, labels, likelihoods)
248  logging.info('combine ready')
249  logging.info('finalClassLabels ' + str(finalClassLabel))
250  logging.info('finalClassProbs ' + str(finalClassProb))
251  return finalClassLabel, finalClassProb, []
252  else:
253  return [None, 0, None]
254 
255  def formatGeneratedData(self, instance):
256  """
257  Method to transform a generated instance from the model into a Yarp formatted output.
258 
259  Args:
260  instance: Feature vector returned during generation of a label.
261 
262  Returns:
263  Yarp formatted output for instance.
264  """
265  # normalise image between 0 and 1
266  yMin = instance.min()
267  instance -= yMin
268  yMax = instance.max()
269  instance /= yMax
270  instance *= 255
271  instance = instance.astype(numpy.uint8)
272  instance = numpy.reshape(instance, (self.paramsDict['imgHNew'], self.paramsDict['imgWNew']))
273 
274  # convert image into yarp rgb image
275  yarpImage = yarp.ImageMono()
276  yarpImage.resize(self.paramsDict['imgHNew'], self.paramsDict['imgWNew'])
277  instance = instance.astype(numpy.uint8)
278  yarpImage.setExternal(instance, instance.shape[1], instance.shape[0])
279 
280  return yarpImage
281 
def processLiveData(self, dataList, thisModel, verbose, additionalData=dict())
Method which receives a list of data frames and outputs a classification if available or &#39;no_classifi...
def readData(self, root_data_dir, participant_index, args, kw)
Method which accepts a data directory, reads all the data in and outputs self.Y which is a numpy arra...
def loadParameters(self, parser, trainName)
Function to load parameters from the model config.ini file.
def saveParameters(self)
Executes SAMDriver.saveParameters to save default parameters.
SAM Driver parent class that defines the methods by which models are initialised, trained and saved...
Definition: SAMDriver.py:35
Class developed for the implementation of face recognition.
def formatGeneratedData(self, instance)
Method to transform a generated instance from the model into a Yarp formatted output.
def __init__(self)
Initialise class using SAMDriver.__init__ and augment with custom parameters.