icub-client
SAMCore.py
Go to the documentation of this file.
1 # """"""""""""""""""""""""""""""""""""""""""""""
2 # The University of Sheffield
3 # WYSIWYD Project
4 #
5 # The core of the Synthetic Autobiographical Memory (SAM) system.
6 # The core is built upon Latent Feature Models (LFMs) and implements
7 # cognitive memory properties, such as recall, pattern completion, compression etc.
8 #
9 # The Core is accompanied by peripherals, currently implemented as Drivers.
10 # Drivers facilitate the communication of sensory modalities and the Core.
11 # See Driver.py for this.
12 #
13 # Created: 2015
14 #
15 # @authors: Andreas Damianou
16 #
17 # """"""""""""""""""""""""""""""""""""""""""""""
18 
19 
20 import GPy
21 import numpy as np
22 import matplotlib.cm as cm
23 import itertools
24 import pylab as pb
25 import cPickle as pickle
26 from scipy.spatial import distance
27 import operator
28 import os
29 
30 
31 
37 class LFM(object):
38  """
39  SAM based on Latent Feature Models.
40  """
41  def __init__(self):
42  """
43  Initalise the Latent Feature Models.
44  """
45  self.type = []
46  self.model = []
47  self.observed = None
48  self.inputs = None
49  self.__num_views = None
50  self.Q = None
51  self.N = None
52  self.num_inducing = None
53  self.namesList = None
54  self.Ylist = None
55 
56  def store(self, observed, inputs=None, Q=None, kernel=None, num_inducing=None, init_X='PCA'):
57  """Store events.
58 
59  Description:
60  Read in the args observed and inputs and configure the LFM model for training or recollection.
61 
62  Args:
63  observed: An `(N x D)` matrix, where `N` is the number of points and `D` the number of features needed to describe each point.
64  inputs: A `(N x Q)` matrix, where `Q` is the number of features per input. Leave `"None"` for unsupervised learning.
65  Q: Leave `None` for supervised learning (`Q` will then be the dimensionality of inputs). Otherwise, specify with an integer `Q` the dimensionality (number of features) for the compressed space that acts as "latent" inputs.
66  kernel: For the GP. Can be left as `"None"` for the default kernel.
67  num_inducing: Integer of how many inducing points to use. Inducing points are a fixed number of variables through which all memory is filtered, to achieve full compression. E.g. it can correspond to the number of neurons. This is not absolutely fixed, but it also doesn't grow necessarily proportionally to the data, since synapses can make more complicated combinations of the existing neurons. The GP is here playing the role of "synapses", by learning non-linear and rich combinations of the inducing points.
68  init_X: Initialisation method for model output. String either `PCA` or `PPCA`. Default= `PCA`. Initialisation uses `PPCA` when `PCA`
69 
70  Returns:
71  None
72  """
73  assert(isinstance(observed, dict))
74  self.observed = observed
75  self.__num_views = len(self.observed.keys())
76  self.Q = Q
77  # self.D = observed.shape[1]
78  self.N = observed[observed.keys()[0]].shape[0]
79  self.num_inducing = num_inducing
80  if num_inducing is None:
81  self.num_inducing = self.N
82  if inputs is None:
83  if self.Q is None:
84  self.Q = 2 # self.D
85  if self.__num_views == 1:
86  assert(self.type == [] or self.type == 'bgplvm')
87  self.type = 'bgplvm'
88  else:
89  assert(self.type == [] or self.type == 'mrd')
90  self.type = 'mrd'
91  else:
92  assert(self.type == [] or self.type == 'gp')
93  assert(self.__num_views == 1)
94  self.Q = inputs.shape[1]
95  self.type = 'gp'
96  self.inputs = inputs
97 
98  if kernel is None:
99  kernel = GPy.kern.RBF(self.Q, ARD=True) + GPy.kern.Bias(self.Q) + GPy.kern.White(self.Q)
100 
101  if self.type == 'bgplvm':
102  Ytmp = self.observed[self.observed.keys()[0]]
103  pcaFailed = False
104  if init_X == 'PCA':
105  try:
106  self.model = GPy.models.BayesianGPLVM(Ytmp, self.Q, kernel=kernel, num_inducing=self.num_inducing)
107  except ValueError:
108  pcaFailed = True
109  print "Initialisation with PCA failed. Initialising with PPCA..."
110  elif init_X == 'PPCA' or pcaFailed:
111  print "Initialising with PPCA..."
112  Xr = GPy.util.linalg.ppca(Ytmp, self.Q, 2000)[0]
113  Xr -= Xr.mean(0)
114  Xr /= Xr.std(0)
115  self.model = GPy.models.BayesianGPLVM(Ytmp, self.Q, kernel=kernel, num_inducing=self.num_inducing, X=Xr)
116  self.model['.*noise'] = Ytmp.var() / 100.
117  elif self.type == 'mrd':
118  # Create a list of observation spaces (aka views)
119  self.Ylist = []
120  self.namesList = []
121  for k in self.observed.keys():
122  self.Ylist = [self.Ylist, self.observed[k]]
123  self.namesList = [self.namesList, k]
124  self.Ylist[0] = self.Ylist[0][1]
125  self.namesList[0] = self.namesList[0][1]
126  pcaFailed = False
127  if init_X == 'PCA':
128  try:
129  self.model = GPy.models.MRD(self.Ylist, input_dim=self.Q, num_inducing=self.num_inducing,
130  kernel=kernel, initx="PCA_single", initz='permute')
131  except ValueError:
132  pcaFailed = True
133  print "Initialisation with PCA failed. Initialising with PPCA..."
134  elif init_X == 'PPCA' or pcaFailed:
135  print "Initialising with PPCA..."
136  from GPy.util.initialization import initialize_latent
137  Xr = np.zeros((self.Ylist[0].shape[0], self.Q))
138  for qs, Y in zip(np.array_split(np.arange(self.Q), len(self.Ylist)), self.Ylist):
139  try:
140  x, frcs = initialize_latent('PCA', len(qs), Y)
141  except ValueError:
142  x = GPy.util.linalg.ppca(Y, len(qs), 2000)[0]
143  Xr[:, qs] = x
144  Xr -= Xr.mean()
145  Xr /= Xr.std()
146  self.model = GPy.models.MRD(self.Ylist, input_dim=self.Q, num_inducing=self.num_inducing, kernel=kernel, initx="PCA_single", initz='permute', X=Xr)
147  self.model['.*noise'] = [yy.var() / 100. for yy in self.model.Ylist]
148  elif self.type == 'gp':
149  self.model = GPy.models.SparseGPRegression(self.inputs, self.observed[self.observed.keys()[0]], kernel=kernel, num_inducing=self.num_inducing)
150 
151  self.model.data_labels = None
152  self.model.textLabelPts = dict()
153 
154  # def _init_latents():
155  # from GPy.util.initialization import initialize_latent
156  # X, fracs = initialize_latent(init, input_dim, Y)
157 
158  def add_labels(self, labels):
159  """Add labels to observations.
160 
161  Description:
162  If observables are associated with labels, they can be added here. Labels has to be a matrix of size N x K, where K is the total number of different labels. If e.g. the i-th row of L is [1 0 0] (or [1 -1 -1]) then this means that there are K=3 different classes and the i-th row of the observables belongs to the first class.
163 
164  Args:
165  labels: list of strings containing the labels for the observations
166 
167  Returns:
168  None
169  """
170  if len(labels.shape) == 1 or labels.shape[1] == 1:
171  self.model.data_labels = labels
172  else:
173  print "Warning: labels assumed to be in 1-of-K encoding!"
174  self.model.data_labels = np.argmax(labels, 1)[:, None]
175 
176  def learn(self, optimizer='bfgs', max_iters=1000, init_iters=300, verbose=True):
177  """
178  Learn the model (analogous to "forming synapses" after perceiving data).
179 
180  Args:
181  optimizer: String with the requested optimiser taken from a the list of available scipy optimisers.
182  max_iters: Integer with the maximum number of training iterations for the second phase of training the model.
183  init_iters: Integer with the maximum number of training iterations for the first phase of training.
184  verbose: Boolean to turn logging to stdout on or off.
185 
186  Returns:
187  None
188  """
189  if self.type == 'bgplvm' or self.type == 'mrd':
190  self.model['.*noise'].fix()
191  self.model.optimize(optimizer, messages=verbose, max_iters=init_iters)
192  self.model['.*noise'].unfix()
193  self.model['.*noise'].constrain_positive()
194 
195  self.model.optimize(optimizer, messages=verbose, max_iters=max_iters)
196  self.check_snr()
197 
198  for j in list(np.unique(self.model.data_labels)):
199  self.model.textLabelPts[j] = [l for l, k in enumerate(self.model.data_labels) if k[0] == j]
200 
201  def check_snr(self, warningEnable=True, messages=True):
202  """Checks the signal to noise ratio(SNR) of the trained model.
203 
204  Description:
205  Provides an indicator of successful learning by looking at the variance distribution of the model.
206 
207  Args:
208  warningEnable: Boolean to switch warnings on or off in the case of a low SNR.
209  messages: Boolean to turn output to stdout on or off.
210  Returns:
211  None
212  """
213  if self.type == 'bgplvm':
214  snr = self.model.Y.var()/self.model.Gaussian_noise.variance.values[0]
215 
216  if messages:
217  print('# SNR: ' + str(snr))
218  if warningEnable and snr < 8:
219  print(' WARNING! SNR is small!')
220  elif self.type == 'mrd':
221  snr = []
222  for i in range(len(self.model.bgplvms)):
223  snr.append(self.model.bgplvms[i].Y.var()/self.model.bgplvms[i].Gaussian_noise.variance.values[0])
224  if messages:
225  print('# SNR view ' + str(i) + ': ' + str(snr[-1]))
226  if warningEnable and snr[-1] < 8:
227  print(' WARNING! SNR for view ' + str(i) + ' is small!!')
228  else:
229  snr = None
230  return snr
231 
232  def visualise(self, which_indices=None, plot_scales=True):
233  """
234  Show the internal representation of the memory.
235 
236  Description:
237  Creates a 2D plot showing the mean and variance distribution of the model.
238 
239  Args:
240  which_indices: Tuple of two integers that specify which indices of the `Q` indices that make up the model are to be plotted.
241  plot_scales: Boolean to switch scale labelling on or off in the plots.
242 
243  Returns:
244  None
245  """
246  # if self.type == 'bgplvm' and which_indices is None:
247  # which_indices = most_significant_input_dimensions(self.model,None)
248  # if self.type == 'mrd' and which_indices is None:
249  # # Assume that labels modality is always the last one!!
250  # which_indices = most_significant_input_dimensions(self.model.bgplvms[-1],None)
251  if self.type == 'bgplvm' or self.type == 'mrd':
252  if self.model.data_labels is not None:
253  ret = self.model.plot_latent(labels=self.model.data_labels, which_indices=which_indices)
254  else:
255  ret = self.model.plot_latent(which_indices=which_indices)
256  elif self.type == 'gp':
257  ret = self.model.plot()
258  if self.type == 'mrd' and plot_scales:
259  ret2 = self.model.plot_scales()
260 
261  # if self.type == 'mrd':
262  # ret1 = self.model.X.plot("Latent Space 1D")
263  # ret2 = self.model.plot_scales("MRD Scales")
264 
265  return ret
266 
267  def visualise_interactive(self, dimensions=(20, 28), transpose=True, order='F', invert=False, scale=False,
268  colorgray=True, view=0, which_indices=(0, 1)):
269  """Interactive plot of the model.
270 
271  Description:
272  Show the internal representation of the memory and allow the user to interact with it to map samples/points from the compressed space to the original output space.
273 
274  Args:
275  dimensions: Tuple of integers describing the dimensions that the image needs to be transposed to for display.
276  transpose: Boolean whether to transpose the image before display.
277  order: Boolean whether array is in Fortan ordering ('F') or Python ordering ('C').
278  invert: Boolean whether to invert the pixels or not.
279  scale: Boolean whether to scale the image or not.
280  colorgray: Boolean whether to plot in grayscale or not.
281  view: Integer in the case of MRD models which describes the view to be plotted.
282  which_indices: Tuple of two integers that specify which indices of the `Q` indices that make up the model are to be plotted.
283 
284  Returns:
285  None
286  """
287 
288  if self.type == 'bgplvm':
289  ax = self.model.plot_latent(which_indices)
290  y = self.model.Y[0, :]
291  # dirty code here
292  if colorgray:
293  data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=dimensions, transpose=transpose, order=order, invert=invert, scale=scale, cmap = cm.Greys_r)
294  else:
295  data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=dimensions, transpose=transpose, order=order, invert=invert, scale=scale)
296  lvm = GPy.plotting.matplot_dep.visualize.lvm(self.model.X.mean[0, :].copy(), self.model, data_show, ax)
297  raw_input('Press enter to finish')
298  elif self.type == 'mrd':
299  """
300  NOT TESTED!!!
301  """
302  ax = self.model.bgplvms[view].plot_latent(which_indices)
303  y = self.model.bgplvms[view].Y[0, :]
304  # dirty code here
305  if colorgray:
306  data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=dimensions, transpose=transpose, order=order, invert=invert, scale=scale, cmap = cm.Greys_r)
307  else:
308  data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=dimensions, transpose=transpose, order=order, invert=invert, scale=scale)
309  lvm = GPy.plotting.matplot_dep.visualize.lvm(self.model.bgplvms[view].X.mean[0, :].copy(), self.model.bgplvms[view], data_show, ax)
310  raw_input('Press enter to finish')
311 
312  def recall(self, locations):
313  """
314  Recall stored events.
315 
316  Description:
317  This is closely related to performing pattern pattern_completion but given "training" data.
318 
319  Args:
320  locations: Integer which is the index of the stored event.
321 
322  Returns:
323  A `(Dx1)` numpy array containing the data of a training point as reconstructed by the model.
324  """
325  if locations == -1:
326  locations = range(self.N)
327  if self.type == 'bgplvm' or self.type == 'gp':
328  return self.model.Y[locations, :].values
329  elif self.type == 'mrd':
330  return self.model.bgplvms[0].Y[locations, :].values
331 
332  def pattern_completion(self, test_data, view=0, verbose=False, visualiseInfo=None, optimise=100):
333  """Recall novel events
334 
335  Description:
336  In the case of supervised learning, pattern completion means that we give new inputs and infer their corresponding outputs. In the case of unsupervised learning, pattern completion means that we give new outputs and we infer their corresponding "latent" inputs, ie the internal compressed representation of the new outputs in terms of the already formed "synapses".
337 
338  Args:
339  test_data : A `(Dx1)` numpy array containing the feature vector for which you would like to obtain the closest neighbour.
340  view : Integer which is the index for the view of the MRD model that will be used for pattern completion.
341  verbose : Boolean switching logging to stdout on and off.
342  visualiseInfo: Plot object returned by visualiseInfo. If present, plot the location of the pattern completed point. If none, no plotting.
343  optimise : Integer number of optimisation iterations when performing pattern completion.
344 
345  Returns:
346  A `(Qx1)` numpy array with the predicted mean, a `(Qx1)` numpy array with the predicted variance, a plot object with plotted point and an inference object returned by optimiser.
347  """
348  if self.type == 'bgplvm':
349  # tmp = self.model.infer_newX(test_data)[0]
350  # pred_mean = tmp.mean
351  # pred_variance = tmp.variance #np.zeros(pred_mean.shape)
352  tmp = self.model.infer_newX(test_data, optimize=False)[1]
353  if optimise != 0:
354  tmp.optimize(max_iters=optimise, messages=verbose)
355  pred_mean = tmp.X.mean
356  pred_variance = tmp.X.variance
357  elif self.type == 'mrd':
358  tmp = self.model.bgplvms[view].infer_newX(test_data, optimize=False)[1]
359  if optimise != 0:
360  tmp.optimize(max_iters=optimise, messages=verbose)
361  pred_mean = tmp.X.mean
362  pred_variance = tmp.X.variance
363  elif self.type == 'gp':
364  tmp = []
365  pred_mean, pred_variance = self.model.predict(test_data)
366 
367  if (self.type == 'mrd' or self.type == 'bgplvm') and visualiseInfo is not None:
368  ax = visualiseInfo['ax']
369  inds0, inds1 = most_significant_input_dimensions(self.model, None)
370  pp = ax.plot(pred_mean[:, inds0], pred_mean[:, inds1], 'om', markersize=11, mew=11)
371  pb.draw()
372  else:
373  pp = None
374 
375  return pred_mean, pred_variance, pp, tmp
376 
377  def pattern_completion_inference(self, y, target_modality=-1):
378  """Pattern completion wrapper.
379 
380  Description:
381  1) First, we do normal pattern completion, where given an output y, out map to the memory space to get a test memory x*.
382  2) Now the test memory x* is compared with stored memories. This allows us to infer the label of x*. If the labels are given in another modality (by default in the last one), then we return the label from that modality (careful, The encoding might be 1-of-K, e.g. -1 1 -1 -> 2 and also noise might exist). Instead, if the labels are not given in another modality (completely unsupervised learning), then we just return the index to the most similar training memory.
383 
384  Args:
385  y : A `(Dx1)` numpy array containing the feature vector for which you would like to obtain the closest neighbour.
386  target_modality : Integer which is the index for the view of the MRD model that will be used for pattern completion.
387 
388  Returns:
389  Inference object returned by optimiser containing multi-dimensional mean and variance of nearest neighbour.
390  """
391  # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.
392  ret = self.pattern_completion(y)
393  mm = ret[0]
394  post = ret[3]
395  # find nearest neighbour of mm and model.X
396  dists = np.zeros((self.model.X.shape[0], 1))
397 
398  for j in range(dists.shape[0]):
399  dists[j, :] = distance.euclidean(self.model.X.mean[j, :], mm[0].values)
400  nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
401  if self.type == 'mrd':
402  ret = self.model.bgplvms[target_modality].Y[nn, :]
403  elif self.type == 'bgplvm':
404  ret = nn # self.model.data_labels[nn]
405  return ret
406 
407  def fantasy_memory(self, X, view=0):
408  """Generating novel outputs.
409 
410  Description:
411  The opposite of pattern completion. Instead of finding a memory from an output, here we find an output from a (possibly fantasy) memory. Here, fantasy memory is a memory not existing in the training set, found by interpolating or sampling in the memory space.
412 
413  Args:
414  X: A `(Qx1)` numpy array with the location of the `Q` dimensional model that is to be generated.
415  view: Integer which is the index for the view of the MRD model that will be used to generate the fantasy_memory.
416  Returns:
417  A `(Qx1)` numpy array with the predicted mean and a `(Qx1)` numpy array with the predicted variance.
418  """
419  if self.type == 'mrd':
420  pred_mean, pred_variance = self.model.bgplvms[view].predict(X)
421  elif self.type == 'bgplvm':
422  pred_mean, pred_variance = self.model.predict(X)
423  elif self.type == 'gp':
424  pred_mean, pred_variance = self.model.predict(X)
425  return pred_mean, pred_variance
426 
427  def familiarity(self, Ytest, ytrmean=None, ytrstd=None, optimise=100):
428  """Familiarity testing.
429 
430  Description:
431  This function tests the familiarity/similarity of an input with the inputs used to train the model.
432 
433  Args:
434  Ytest : A `(Dx1)` numpy array whose familiarity/similarity is tested with trained outputs of the model.
435  ytrmean : A `(Dx1)` numpy array with the mean of the training inputs.
436  ytrstd : A `(Dx1)` numpy array with the variance of the training inputs.
437  optimise : Integer number of optimisation iterations when performing pattern completion.
438 
439  Returns:
440  A float with a measure of familiarity for Ytest with the current model.
441  """
442  assert(self.type == 'bgplvm')
443 
444  N = Ytest.shape[0]
445  if ytrmean is not None:
446  Ytest -= ytrmean
447  Ytest /= ytrstd
448 
449  from SAM.SAM_Core.svi_ratio import SVI_Ratio
450  s = SVI_Ratio()
451  _, _, _, qX = self.pattern_completion(Ytest, verbose=False, optimise=optimise)
452  qX = qX.X
453 
454  ll = 0
455  for i in range(N):
456  ll += s.inference(self.model.kern, qX[i, :][None, :], self.model.Z, self.model.likelihood,
457  Ytest[i, :][None, :], self.model.posterior)[0]
458  ll /= N
459  return ll
460 
461  def __get_latent__(self):
462  """ Return number of latent dimensions.
463 
464  Description:
465  Convenience function to return the number of latent dimensions.
466 
467  Args:
468  None.
469 
470  Returns:
471  Integer with the number of latent dimensions of the model.
472  """
473  if self.type == 'bgplvm':
474  numLatentDimensions = self.model.X.mean
475  elif self.type == 'mrd':
476  numLatentDimensions = self.model.bgplvms[0].X.mean
477  else:
478  print('No latent space for this type of model.')
479  numLatentDimensions = None
480  return numLatentDimensions
481 
482 
483 
484 def save_model(mm, fileName='m_serialized.txt'):
485  """ Save serialised model.
486 
487  Args:
488  mm : Model object to save.
489  fileName : String with the filename of saved model.
490  Returns:
491  None
492  """
493  #mPruned = mm.getstate() # TODO (store less stuff)
494  output = open(fileName, 'wb')
495  #pickle.dump(mPruned, output)
496  pickle.dump(mm, output)
497  output.close()
498 
499 
500 def load_model(fileName='m_serialized.txt'):
501  """ Load serialised model.
502 
503  Args:
504  fileName : String with the filename of model to load.
505  Returns:
506  SAMObject Model
507  """
508  mm = pickle.load(open(fileName, 'r'))
509  return mm
510 
511 
512 def save_pruned_model(mm, fileName='m_pruned', economy=False, extraDict=dict()):
513  """Save a pruned model
514 
515  Description:
516  Save a trained model after pruning things that are not needed to be stored. Economy set to `True` will trigger a storing which creates much smaller files. See the load_pruned_model discussion on what this means in terms of restrictions.
517 
518  Args:
519  mm : Model object to save.
520  fileName : String with the filename of saved model.
521  economy : Boolean to enable or disable economy saving.
522  extraDict : Dictionary with parameters that are requested to be saved which are not in the default saved parameters but are required when loading the model for interaction.
523 
524  Returns:
525  None
526  """
527  SAMObjPruned=dict()
528  SAMObjPruned['type'] = mm.type
529  if mm.model:
530  SAMObjPruned['textLabelPts'] = mm.model.textLabelPts
531  # SAMObjPruned['observed'] = mm.observed # REMOVE
532  # SAMObjPruned['inputs'] = mm.inputs
533 # SAMObjPruned['__num_views'] = mm.__num_views
534  SAMObjPruned['__num_views'] = None
535  SAMObjPruned['Q'] = mm.Q
536  SAMObjPruned['N'] = mm.N
537  SAMObjPruned['num_inducing'] = mm.num_inducing
538  SAMObjPruned['namesList'] = mm.namesList
539  SAMObjPruned['kernelString'] = mm.kernelString
540  SAMObjPruned.update(extraDict)
541 
542  # if economy:
543  # SAMObjPruned['modelPath'] = fileName + '_model.h5'
544  # #if file exists delete
545  # if(os.path.isfile(SAMObjPruned['modelPath'])):
546  # os.remove(SAMObjPruned['modelPath'])
547  # mm.model.save(SAMObjPruned['modelPath'])
548  # else:
549  # SAMObjPruned['modelPath'] = fileName + '_model.pickle'
550  # mm.model.pickle(SAMObjPruned['modelPath'])
551  folderPath = os.path.join('/', *fileName.split('/')[:-1])
552  fileName = fileName.split('/')[-1]
553 
554  if economy:
555  SAMObjPruned['modelPath'] = fileName + '_model.h5'
556  # if file exists delete
557  if os.path.isfile(os.path.join(folderPath, SAMObjPruned['modelPath'])):
558  os.remove(os.path.join(folderPath, SAMObjPruned['modelPath']))
559  if mm.model:
560  mm.model.save(os.path.join(folderPath, SAMObjPruned['modelPath']))
561  else:
562  SAMObjPruned['modelPath'] = fileName + '_model.pickle'
563  mm.model.pickle(os.path.join(folderPath, SAMObjPruned['modelPath']))
564 
565  output = open(os.path.join(folderPath, fileName) + '.pickle', 'wb')
566  pickle.dump(SAMObjPruned, output)
567  output.close()
568 
569 
570 def load_pruned_model(fileName='m_pruned', economy=False, m=None):
571  """Load a pruned model
572 
573  Description:
574  Load a trained model. If economy is set to `True`, then a not-None initial model m is needed. This model needs to be created exactly as the one that was saved (so, it is demo specific!) and in this case calling the present function will set its parameters (meaning that you still need to create a model but don't need to optimize it).
575 
576  Args:
577  fileName : String with the filename of the model to load.
578  economy : Boolean to indicate whether an economy object is being loaded or not.
579  m : Model object into which the data to be loaded is to be stored in. If left at `None` model will be loaded into a default model initialisation.
580 
581  Returns:
582  SAMObject model
583  """
584  folderPath = os.path.join('/', *fileName.split('/')[:-1])
585  SAMObjPruned = pickle.load(open(fileName + '.pickle', 'rb'))
586  SAMObject = LFM()
587  if economy:
588  assert m is not None
589  import tables
590  f = tables.open_file(os.path.join(fileName+'_model.h5'), 'r')
591  m.param_array[:] = f.root.param_array[:]
592  f.close()
593  m._trigger_params_changed()
594  SAMObject.model = m
595  else:
596  with open(SAMObjPruned['modelPath'], 'rb') as f:
597  print "Loading file: " + str(f)
598  SAMObject.model = pickle.load(f)
599  # TODO: The following is supposed to update the model, but maybe not. Change...
600  # LB get error here using MRD
601  # SAMObject.model.update_toggle()
602  # SAMObject.model.update_toggle()
603 
604  SAMObject.type = SAMObjPruned['type']
605  # SAMObject.observed = SAMObjPruned['observed']
606  # SAMObject.inputs = SAMObjPruned['inputs']
607  SAMObject.model.textLabelPts = SAMObjPruned['textLabelPts']
608  SAMObject.__num_views = SAMObjPruned['__num_views']
609  SAMObject.Q = SAMObjPruned['Q']
610  SAMObject.N = SAMObjPruned['N']
611  SAMObject.num_inducing = SAMObjPruned['num_inducing']
612  SAMObject.namesList = SAMObjPruned['namesList']
613 
614  return SAMObject
615 
616 
618  """ Determine the most descriptive output dimensions.
619 
620  Description:
621  Helper function to determine which dimensions should be plotted based on the relevance weights.
622 
623  Args:
624  model: Model object to be assessed.
625 
626  Returns:
627  Integer indicating the most descriptive dimension and an integer indicating the second most descriptive dimension.
628  """
629  if model.input_dim == 1:
630  input_1 = 0
631  input_2 = None
632  if model.input_dim == 2:
633  input_1, input_2 = 0, 1
634  else:
635  try:
636  input_1, input_2 = np.argsort(model.input_sensitivity())[::-1][:2]
637  except:
638  raise ValueError("cannot automatically determine which dimensions to plot, please pass 'which_indices'")
639 
640  return input_1, input_2
def check_snr(self, warningEnable=True, messages=True)
Checks the signal to noise ratio(SNR) of the trained model.
Definition: SAMCore.py:212
def add_labels(self, labels)
Add labels to observations.
Definition: SAMCore.py:169
Inference the marginal likelihood through {p(y,y*)}{p(y)}.
Definition: svi_ratio.py:23
def most_significant_input_dimensions(model)
Determine the most descriptive output dimensions.
Definition: SAMCore.py:628
def save_pruned_model(mm, fileName='m_pruned', economy=False, extraDict=dict())
Save a pruned model.
Definition: SAMCore.py:526
def pattern_completion(self, test_data, view=0, verbose=False, visualiseInfo=None, optimise=100)
Recall novel events.
Definition: SAMCore.py:347
def __get_latent__(self)
Return number of latent dimensions.
Definition: SAMCore.py:472
def recall(self, locations)
Recall stored events.
Definition: SAMCore.py:324
def load_model(fileName='m_serialized.txt')
Load serialised model.
Definition: SAMCore.py:507
def load_pruned_model(fileName='m_pruned', economy=False, m=None)
Load a pruned model.
Definition: SAMCore.py:583
def visualise(self, which_indices=None, plot_scales=True)
Show the internal representation of the memory.
Definition: SAMCore.py:245
def visualise_interactive(self, dimensions=(20, 28), transpose=True, order='F', invert=False, scale=False, colorgray=True, view=0, which_indices=(0, 1))
Interactive plot of the model.
Definition: SAMCore.py:286
def save_model(mm, fileName='m_serialized.txt')
Save serialised model.
Definition: SAMCore.py:492
def pattern_completion_inference(self, y, target_modality=-1)
Pattern completion wrapper.
Definition: SAMCore.py:390
def fantasy_memory(self, X, view=0)
Generating novel outputs.
Definition: SAMCore.py:418
def __init__(self)
Initalise the Latent Feature Models.
Definition: SAMCore.py:44
def familiarity(self, Ytest, ytrmean=None, ytrstd=None, optimise=100)
Familiarity testing.
Definition: SAMCore.py:441
SAM based on Latent Feature Models.
Definition: SAMCore.py:40
def store(self, observed, inputs=None, Q=None, kernel=None, num_inducing=None, init_X='PCA')
Store events.
Definition: SAMCore.py:72
def learn(self, optimizer='bfgs', max_iters=1000, init_iters=300, verbose=True)
Learn the model (analogous to "forming synapses" after perceiving data).
Definition: SAMCore.py:188