13 matplotlib.use(
"TkAgg")
15 from ConfigParser
import SafeConfigParser
17 import matplotlib.mlab
as mlab
18 import matplotlib.pyplot
as plt
20 from os
import listdir
21 from os.path
import join, isdir
25 np.set_printoptions(precision=2)
33 """Initialise SAM Model data structure, training parameters and user parameters. 35 This method starts by initialising the required Driver from the driver name in argv[3] if it exists 36 in SAM_Drivers folder. The standard model parameters and the specific user parameters are then initialised 37 and the data is read in by the SAMDriver.readData method to complete the model data structure. This method 38 then replicates the model data structure for training with multiple models if it is required in the config 39 file loaded by the Driver. 42 argv_0: dataPath containing the data that is to be trained on. 43 argv_1: modelPath containing the path of where the model is to be stored. 44 argv_2: driverName containing the name of the driver class that is to be loaded from SAM_Drivers folder. 45 update: String having either a value of 'update' or 'new'. 'new' will load the parameters as set in the 46 config file of the driver being loaded present in the dataPath directory. This is used to train a 47 new model from scratch. 'update' will check for an existing model in the modelPath directory and 48 load the parameters from this model if it exists. This is used for retraining a model when new 49 data becomes available. 50 initMode: String having either a value of 'training' or 'interaction'. 'training' takes into consideration 51 the value of update in loading the parameters. (Used by trainSAMModel.py) 'interaction' loads 52 the parameters directly from the model if the model exists. (Used by interactionSAMModel.py) 55 The output is a list of SAMDriver models. The list is of length 1 when the config file requests a single 56 model or a list of length n+1 for a config file requesting multiple models where n is the number of 57 requested models. The number of models either depends on the number of directories present in the dataPath 58 or from the length of textLabels returned from the SAMDriver.readData method. 67 stringCommand =
'from SAM.SAM_Drivers import ' + driverName +
' as Driver' 68 logging.info(stringCommand)
73 trainName = dataPath.split(
'/')[-1]
76 participantList = [f
for f
in listdir(dataPath)
if isdir(join(dataPath, f))]
79 logging.info(
'-------------------')
80 logging.info(
'Training Settings:')
82 logging.info(
'Init mode: '.ljust(off) + str(initMode))
83 logging.info(
'Data Path: '.ljust(off) + str(dataPath))
84 logging.info(
'Model Path: '.ljust(off) + str(modelPath))
85 logging.info(
'Participants: '.ljust(off) + str(participantList))
86 logging.info(
'Model Root Name: '.ljust(off) + str(trainName))
87 logging.info(
'Training Mode:'.ljust(off) + str(mode))
88 logging.info(
'Driver:'.ljust(off) + str(driverName))
89 logging.info(
'-------------------')
90 logging.info(
'Loading Parameters...')
95 parser = SafeConfigParser()
96 found = parser.read(dataPath +
"/config.ini")
98 if parser.has_option(trainName,
'update_mode'):
99 modeConfig = parser.get(trainName,
'update_mode')
101 modeConfig =
'update' 102 logging.info(modeConfig)
106 defaultParamsList = [
'experiment_number',
'model_type',
'model_num_inducing',
107 'model_num_iterations',
'model_init_iterations',
'verbose',
108 'Quser',
'kernelString',
'ratioData',
'update_mode',
'model_mode',
109 'temporalModelWindowSize',
'optimiseRecall',
'classificationDict',
110 'useMaxDistance',
'calibrateUnknown']
112 mySAMpy.experiment_number =
None 113 mySAMpy.model_type =
None 114 mySAMpy.kernelString =
None 116 mySAMpy.ratioData =
None 118 if initMode ==
'training' and (mode ==
'new' or modeConfig ==
'new' or 'exp' not in modelPath):
119 logging.info(
'Loading training parameters from:' + str(dataPath) +
"/config.ini")
122 parser = SafeConfigParser()
123 parser.optionxform = str
124 found = parser.read(dataPath +
"/config.ini")
126 mySAMpy.experiment_number =
'exp' 128 if parser.has_option(trainName,
'model_type'):
129 mySAMpy.model_type = parser.get(trainName,
'model_type')
132 mySAMpy.model_type =
'mrd' 134 if parser.has_option(trainName,
'model_num_inducing'):
135 mySAMpy.model_num_inducing = int(parser.get(trainName,
'model_num_inducing'))
138 mySAMpy.model_num_inducing = 30
140 if parser.has_option(trainName,
'model_num_iterations'):
141 mySAMpy.model_num_iterations = int(parser.get(trainName,
'model_num_iterations'))
144 mySAMpy.model_num_iterations = 700
146 if parser.has_option(trainName,
'model_init_iterations'):
147 mySAMpy.model_init_iterations = int(parser.get(trainName,
'model_init_iterations'))
150 mySAMpy.model_init_iterations = 2000
152 if parser.has_option(trainName,
'verbose'):
153 mySAMpy.verbose = parser.get(trainName,
'verbose') ==
'True' 156 mySAMpy.verbose =
False 158 if parser.has_option(trainName,
'optimiseRecall'):
159 mySAMpy.optimiseRecall = int(parser.get(trainName,
'optimiseRecall'))
162 mySAMpy.optimiseRecall = 200
164 if parser.has_option(trainName,
'useMaxDistance'):
165 mySAMpy.useMaxDistance = parser.get(trainName,
'useMaxDistance') ==
'True' 167 mySAMpy.useMaxDistance =
False 169 if parser.has_option(trainName,
'calibrateUnknown'):
170 mySAMpy.calibrateUnknown = parser.get(trainName,
'calibrateUnknown') ==
'True' 172 mySAMpy.calibrateUnknown =
False 174 if parser.has_option(trainName,
'model_mode'):
175 mySAMpy.model_mode = parser.get(trainName,
'model_mode')
176 if mySAMpy.model_mode ==
'temporal' and parser.has_option(trainName,
'temporalModelWindowSize'):
177 mySAMpy.temporalWindowSize = int(parser.get(trainName,
'temporalModelWindowSize'))
182 mySAMpy.model_mode =
'single' 184 if parser.has_option(trainName,
'Quser'):
185 mySAMpy.Quser = int(parser.get(trainName,
'Quser'))
190 if parser.has_option(trainName,
'kernelString'):
191 mySAMpy.kernelString = parser.get(trainName,
'kernelString')
194 mySAMpy.kernelString =
"GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q)" 196 if parser.has_option(trainName,
'ratioData'):
197 mySAMpy.ratioData = int(parser.get(trainName,
'ratioData'))
200 mySAMpy.ratioData = 50
203 logging.info(
'Default settings applied')
205 mySAMpy.paramsDict = dict()
206 mySAMpy.loadParameters(parser, trainName)
209 logging.warning(
'IO Exception reading ', found)
212 logging.info(
'Loading parameters from: \n \t' + str(modelPath))
214 parser = SafeConfigParser()
215 parser.optionxform = str
216 found = parser.read(dataPath +
"/config.ini")
219 mySAMpy.experiment_number = modelPath.split(
'__')[-1]
221 modelPickle = pickle.load(open(modelPath+
'.pickle',
'rb'))
222 mySAMpy.paramsDict = dict()
223 for j
in parser.options(trainName):
224 if j
not in defaultParamsList:
226 mySAMpy.paramsDict[j] = modelPickle[j]
228 mySAMpy.ratioData = modelPickle[
'ratioData']
229 mySAMpy.model_type = modelPickle[
'model_type']
230 mySAMpy.model_mode = modelPickle[
'model_mode']
231 if mySAMpy.model_mode ==
'temporal':
232 mySAMpy.temporalModelWindowSize = modelPickle[
'temporalModelWindowSize']
233 mySAMpy.model_type =
'mrd' 234 mySAMpy.model_num_inducing = modelPickle[
'model_num_inducing']
235 mySAMpy.model_num_iterations = modelPickle[
'model_num_iterations']
236 mySAMpy.model_init_iterations = modelPickle[
'model_init_iterations']
237 mySAMpy.verbose = modelPickle[
'verbose']
238 mySAMpy.Quser = modelPickle[
'Quser']
239 mySAMpy.optimiseRecall = modelPickle[
'optimiseRecall']
240 mySAMpy.kernelString = modelPickle[
'kernelString']
241 mySAMpy.calibrated = modelPickle[
'calibrated']
245 mySAMpy.useMaxDistance = modelPickle[
'useMaxDistance']
247 logging.warning(
'Failed to load useMaxDistace. Possible reasons: ' 248 'Not saved or multiple model implementation')
249 mySAMpy.calibrateUnknown = modelPickle[
'calibrateUnknown']
250 if mySAMpy.calibrateUnknown:
251 mySAMpy.classificationDict = modelPickle[
'classificationDict']
254 logging.warning(
'IO Exception reading ', found)
257 if 'exp' in modelPath
or 'best' in modelPath
or 'backup' in modelPath:
258 fnameProto =
'/'.join(modelPath.split(
'/')[:-1]) +
'/' + dataPath.split(
'/')[-1] +
'__' + driverName + \
259 '__' + mySAMpy.model_type +
'__' + str(mySAMpy.experiment_number)
261 fnameProto = modelPath + dataPath.split(
'/')[-1] +
'__' + driverName +
'__' + mySAMpy.model_type + \
262 '__' + str(mySAMpy.experiment_number)
264 logging.info(
'Full model name: ' + str(fnameProto))
265 logging.info(
'-------------------')
268 mySAMpy.save_model =
False 269 mySAMpy.economy_save =
True 270 mySAMpy.visualise_output =
False 273 mySAMpy.readData(dataPath, participantList)
275 if mySAMpy.model_mode !=
'temporal':
277 mySAMpy.textLabels = list(set(mySAMpy.L))
280 mySAMpy.L = np.asarray([mySAMpy.textLabels.index(i)
for i
in mySAMpy.L])[:,
None]
281 mySAMpy.textLabels = mySAMpy.textLabels
293 if mySAMpy.model_mode ==
'single' or mySAMpy.model_mode ==
'temporal':
294 mm[0].participantList = [
'all']
296 mm[0].participantList = [
'root'] + mySAMpy.textLabels
298 for k
in range(len(mm[0].participantList)):
299 if mm[0].participantList[k] ==
'all':
301 minData = len(mm[k].L)
302 mm[0].fname = fnameProto
303 mm[0].model_type = mySAMpy.model_type
304 Ntr = int(mySAMpy.ratioData * minData / 100)
309 inds = [i
for i
in range(len(mm[0].Y[
'L']))
if mm[0].Y[
'L'][i] == k - 1]
310 mm[k].Y = mm[0].Y[
'Y'][inds]
311 mm[k].L = mm[0].Y[
'L'][inds]
312 mm[k].Quser = mm[0].Quser
313 mm[k].verbose = mm[0].verbose
314 logging.info(
'Object class: ' + str(mm[0].participantList[k]))
316 mm[k].fname = fnameProto +
'__L' + str(k - 1)
317 mm[0].listOfModels.append(mm[k].fname)
318 mm[k].model_type =
'bgplvm' 319 Ntr = int(mySAMpy.ratioData * minData / 100)
322 normaliseData =
False 323 mm[0].listOfModels = []
324 mm[0].fname = fnameProto
325 mm[0].SAMObject.kernelString =
'' 326 minData = len(mm[0].L)
327 Ntr = int(mySAMpy.ratioData * minData / 100)
328 mm[k].modelLabel = mm[0].participantList[k]
330 if mm[0].model_mode !=
'temporal':
332 [Yall, Lall, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr,
334 normalise=normaliseData)
337 mm[k].YtestAll = YtestAll
338 mm[k].LtestAll = LtestAll
339 elif mm[0].model_mode ==
'temporal':
340 [Xall, Yall, Lall, XtestAll, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr,
342 normalise=normaliseData)
346 mm[k].XtestAll = XtestAll
347 mm[k].YtestAll = YtestAll
348 mm[k].LtestAll = LtestAll
350 logging.info(
'minData = ' + str(minData))
351 logging.info(
'ratioData = ' + str(mySAMpy.ratioData))
352 logging.info(
'-------------------------------------------------------------------------------------------------')
353 if initMode ==
'training':
354 samOptimiser.deleteModel(modelPath,
'exp')
355 for k
in range(len(mm[0].participantList)):
357 if mm[0].participantList[k] !=
'root':
359 logging.info(
"Training with " + str(mm[0].model_num_inducing) +
' inducing points for ' +
360 str(mm[0].model_init_iterations) +
'|' + str(mm[0].model_num_iterations))
361 logging.info(
"Fname:" + str(mm[k].fname))
363 mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations,
364 mm[0].model_init_iterations, mm[k].fname, mm[0].save_model,
365 mm[0].economy_save, keepIfPresent=
False, kernelStr=mm[0].kernelString)
367 if mm[0].visualise_output:
368 ax = mm[k].SAMObject.visualise()
369 visualiseInfo = dict()
370 visualiseInfo[
'ax'] = ax
374 for k
in range(len(mm[0].participantList)):
376 if mm[0].participantList[k] !=
'root':
377 logging.info(
"Training with " + str(mm[0].model_num_inducing) +
' inducing points for ' +
378 str(mm[0].model_init_iterations) +
'|' + str(mm[0].model_num_iterations))
380 mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations,
381 mm[0].model_init_iterations, mm[k].fname, mm[0].save_model,
382 mm[0].economy_save, keepIfPresent=
True, kernelStr=mm[0].kernelString)
389 Utility function to perform threshold or range checking. 392 varianceDirection : List of strings with the conditions to check. 393 x : The value to be checked. 394 thresh : The threshold against which x is to be checked given the checks in varianceDirection. 397 Boolean True or False confirming arg x validates varianceDirection conditions for the threshold. 399 if varianceDirection == [
'greater',
'smaller']:
400 return thresh[0] < x < thresh[1]
401 elif varianceDirection == [
'smaller',
'greater']:
402 return thresh[0] > x > thresh[1]
403 elif varianceDirection == [
'greater']:
405 elif varianceDirection == [
'smaller']:
410 """Custom TimeoutError Exception. 413 Class used to raise TimeoutError Exceptions. 418 class InterruptableThread(threading.Thread):
419 """Class to launch a function inside of a separate thread. 421 def __init__(self, func, *args, **kwargs):
423 Initialise the interruptible thread. 425 threading.Thread.__init__(self)
441 Result of the function. 448 Class to terminate a function running inside of a separate thread. 452 Initialise the timeout function. 458 Initialise an interruptible thread and start the thread. 460 def wrapped_f(*args, **kwargs):
464 if not it.is_alive():
470 def plotKnownAndUnknown(varDict, colour, axlist, width=[0.2, 0.2], factor=[(0, 0.6), (0.4, 1)], plotRange=
False):
472 Utility function to plot variances of known and unknown as gaussian distributions. 475 varDict : Dictionary containing the mean and variances of known and unknown for different sections of data. 476 colour : List of strings with the colours to be used for each plot. 477 axlist : Plot object to pass in and update. 478 width : List of floats with the linewidth for the plots. 479 factor : List of tuples with factors for the plotting of ranges. 480 plotRange : Boolean to plot a range together with gaussian distributions or not. 483 Plot object for the generated plot. 486 for k, j
in enumerate(varDict.keys()):
487 if len(varDict[j]) > 0
and 'Results' not in j:
489 axlist =
plotGaussFromList(mlist, vlist, rlist, colour[count], j, width[count], factor[count], axlist, plotRange)
497 Calculates a measure for the separability of two univariate gaussians. 499 Returns the bhattacharyya distance that is used to optimise for separability between known and unknown classes when these are modelled as univariate gaussians. 502 mu1: Float with mean of distribution 1. 503 mu2: Float with mean of distribution 2. 504 var1: Float with variance of distribution 1. 505 var2: Float with variance of distribution 2. 508 Returns a float with the bhattacharyya distance between the two distributions. 511 t1 = float(var1/var2) + float(var2/var1) + 2
513 t3 = float(mu1-mu2)*float(mu1-mu2)
514 t4 = t3/float(var1+var2)
515 return 0.25*t2 + 0.25*t4
519 """Generate and display a confusion matrix. 521 This method plots a formatted confusion matrix from the provided array and target names. 524 cm: Square numpy array containing the values for the confusion matrix. 525 targetNames: labels for the different classes. 526 title: Title of the plot. 527 cmap: Matplotlib colourmap for the plot. 530 No return. Blocking call to matplotlib.plot. 533 plt.imshow(cm, interpolation=
'nearest', cmap=cmap)
536 tick_marks = np.arange(len(targetNames))
537 plt.xticks(tick_marks, targetNames, rotation=45)
538 plt.yticks(tick_marks, targetNames)
540 plt.ylabel(
'True label')
541 plt.xlabel(
'Predicted label')
545 def plotGaussFromList(mlist, vlist, rlist, colour, label, width, factor, axlist, plotRange=False):
547 Plot multiple Gaussians from a list on the same plot. 550 mlist: List of float means. 551 vlist: List of float variances. 552 rlist: List of float data ranges. 553 colour: Colour for the plots. 554 label: Label for the plot. 555 width: Width of line in the plot. 556 factor: Factor for the height of the ranges to make them more aesthetic. 557 axlist: List of axes. 558 plotRange: Boolean to plot ranges or not. 563 numPlots = len(mlist)
567 f, axlist = plt.subplots(1, numPlots, figsize=(12.0, 7.5))
568 for k, j
in enumerate(axlist):
570 j.set_title(
'D ' + str(k), fontsize=20)
571 elif k == numPlots - 2:
572 j.set_title(
'Sum', fontsize=20)
573 elif k > numPlots - 2:
574 j.set_title(
'Mean', fontsize=20)
578 for j
in range(numPlots):
579 sigma = np.sqrt(vlist[j])
580 rangeData = rlist[j][1] - rlist[j][0]
581 x = np.linspace(rlist[j][0] - (rangeData / 2), rlist[j][1] + (rangeData / 2), 100)
582 y = mlab.normpdf(x, mlist[j], sigma)
583 axlist[j].plot(x, y, colour, label=label)
585 axlist[j].plot([rlist[j][1], rlist[j][1]], [max(y)*factor[0], max(y)*factor[1]],
'--'+colour, linewidth=width)
586 axlist[j].plot([rlist[j][0], rlist[j][0]], [max(y)*factor[0], max(y)*factor[1]],
'--'+colour, linewidth=width)
593 Solve for the intersection/s of two Gaussian distributions. 596 m1: Float Mean of Gaussian 1. 597 m2: Float Mean of Gaussian 2. 598 std1: Float Standard Deviation of Gaussian 1. 599 std2: Float Standard Deviation of Gaussian 2. 602 Points of intersection for the two Gaussian distributions. 604 a = 1/(2*std1**2) - 1/(2*std2**2)
605 b = m2/(std2**2) - m1/(std1**2)
606 c = m1**2 / (2*std1**2) - m2**2 / (2*std2**2) - np.log(std2/std1)
607 return np.roots([a, b, c])
612 Calulates the probability of a sample from a histogram. 615 sample : Float with sample to be tested. 616 hist : Numpy array with normalised histogram probability values. 617 binWidth : Float indicating the width for each probability bin. 620 Probability ranging from 0 to 1 for sample with respect to the histogram. 622 idx = np.asarray(sample)//binWidth
623 idx = idx.astype(np.int)
625 for j
in range(len(idx)):
626 pList.append(hist[j][idx[j]])
632 Calculate list of means, variances and ranges for the data in the dataList. 635 dataList: List of numpy arrays containing the data to check. 638 List of means, list of variances and list of ranges. One for each array in the dataList. 644 dataArray = np.asarray(dataList)
645 if len(dataArray.shape) == 1:
646 dataArray = dataArray[:,
None]
648 numPlots = dataArray.shape[1]
650 for j
in range(numPlots):
660 rlist.append((min(h), max(h)))
662 vlist.append(variance)
664 return mlist, vlist, rlist
669 Calculate bhattacharyya distances for each item in the dictionaries. 672 m : Dictionary of means. 673 v : Dictionary of variances. 676 List of bhattacharyya distances for the input dictionaries. 683 if 'known' == j.lower().split(
' ')[1]:
685 elif 'unknown' in j.lower().split(
' ')[1]:
688 if unknownLabel
is not None and knownLabel
is not None:
689 numDists = len(m[knownLabel])
690 for j
in range(numDists):
698 def smooth1D(x, window_len=11, window='hanning'):
699 """Smooth the data using a window with a requested size. 701 This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the beginning and end part of the output signal. 705 window_len: The dimension of the smoothing window; should be an odd integer. 706 window: The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. 713 raise ValueError(
"smooth only accepts 1 dimension arrays.")
715 if x.size < window_len:
716 raise ValueError(
"Input vector needs to be bigger than window size.")
721 if window
not in [
'flat',
'hanning',
'hamming',
'bartlett',
'blackman']:
722 raise ValueError(
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
724 s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
727 w = np.ones(window_len,
'd')
729 w = eval(
'np.'+window+
'(window_len)')
731 y = np.convolve(w/w.sum(), s, mode=
'valid')
733 if window_len % 2 > 0:
736 return y[(window_len/2-1):-((window_len/2)+off)]
742 Utility function to convert a time series into multiple time windows with additional functionality. 745 Y : Time series data. 746 timeWindow : Length of the time window. 747 offset : Number of non-overlapping frames between successive time windows. 748 normalised : Boolean to normalise time windows with respect to the starting frame. 749 reduced : Boolean to remove the starting frame if normalisation is enabled since this frame contains 0. 750 noY : Boolean to return time windows of labels together. 751 doOffset : In future versions this parameter will enable skipping data points between sampled windows. 754 X : numpy array of size (numberWindows x lengthOfWindow) containing the time series split up into windows. 755 Y : numpy array of size (numberWindows x lengthOfWindow) containing the labels for each frame in the time series split into windows. 759 blocksNumber = (Ntr - timeWindow + 1) // offset
761 blocksNumber = (Ntr - timeWindow) // offset
763 if normalised
and reduced:
764 X = np.zeros((blocksNumber, (timeWindow - 1) * D))
766 X = np.zeros((blocksNumber, timeWindow * D))
769 Ynew = np.zeros((blocksNumber, D))
773 for i
in range(blocksNumber):
775 tmp = Y[base:base + timeWindow, :].T
778 tmp = np.subtract(tmp, tmp[:, 0][:,
None])
780 tmp = np.delete(tmp, 0, 1)
781 X[i, :] = tmp.flatten().T
784 Ynew[i, :] = Y[base + timeWindow, :]
Custom TimeoutError Exception.
def __init__(self, func, args, kwargs)
Initialise the interruptible thread.
def transformTimeSeriesToSeq(Y, timeWindow, offset=1, normalised=False, reduced=False, noY=False, doOffset=False)
Utility function to convert a time series into multiple time windows with additional functionality...
def run(self)
Run the function.
def __init__(self, sec)
Initialise the timeout function.
def plotGaussFromList(mlist, vlist, rlist, colour, label, width, factor, axlist, plotRange=False)
Plot multiple Gaussians from a list on the same plot.
Class to launch a function inside of a separate thread.
def initialiseModels(argv, update, initMode='training')
Initialise SAM Model data structure, training parameters and user parameters.
def varianceClass(varianceDirection, x, thresh)
Utility function to perform threshold or range checking.
def bhattacharyya_distance(mu1, mu2, var1, var2)
Calculates a measure for the separability of two univariate gaussians.
def PfromHist(sample, hist, binWidth)
Calulates the probability of a sample from a histogram.
def smooth1D(x, window_len=11, window='hanning')
Smooth the data using a window with a requested size.
def solve_intersections(m1, m2, std1, std2)
Solve for the intersection/s of two Gaussian distributions.
def meanVar_varianceDistribution(dataList)
Calculate list of means, variances and ranges for the data in the dataList.
Class to terminate a function running inside of a separate thread.
def bhattacharyya_dict(m, v)
Calculate bhattacharyya distances for each item in the dictionaries.
def plot_confusion_matrix(cm, targetNames, title='Confusion matrix', cmap=plt.cm.inferno)
Generate and display a confusion matrix.
def plotKnownAndUnknown(varDict, colour, axlist, width=[0.2, factor=[(0, 0.6), plotRange=False)
Utility function to plot variances of known and unknown as gaussian distributions.
def __call__(self, f)
Initialise an interruptible thread and start the thread.