## Software: ## (c) Vicenç Torra ## Current version: 20230119 ## ## This file includes the code for the experiments explained in the paper: ## E. Turkarslan, V. Torra, Measure Identification for the Choquet integral: ## a Python module, I. J. of Comp. Intel. Systems 15:89 (2022) ## https://doi.org/10.1007/s44196-022-00146-w ## ## This is based on the software for non-additive measures (capacities, fuzzy measures) ## Reference on this module in python: ## E. Turkarslan, V. Torra, Measure Identification for the Choquet integral: ## a Python module, I. J. of Comp. Intel. Systems 15:89 (2022) ## https://doi.org/10.1007/s44196-022-00146-w ## ## References on aggregation functions: ## V. Torra, Y. Narukawa (2007) Modeling Decisions, Springer. ## ## ## ## The following is needed to run these experiments ## ## exec(open("prog.vectors.matrices.web.txt").read()) ## exec(open("prog.choquet.web.txt").read()) ### These experiments also use sklearn datasets California and Diabetes def testCalifornia (nRecords, belief=False, kAdditive=2): """ Function: Learn fuzzy measure for the california data set (from sklearn) using the first nRecords. Options belief and kAdditive parameters for the fuzzy measure Data size: 20640 records, 8 inputs, 1 output Output: (0) fuzzy measure, (1) Moebius transform, (2) Shapley and interactions of two elements, (3) time elapsed in the function (in seconds). """ timeStart = time.time() cal = sklearn.datasets.fetch_california_housing() dCalX, dCaly = cal.data[0:nRecords], cal.target[0:nRecords] dCalXn = preprocessing.normalize(dCalX, axis=0) dCalyn = preprocessing.normalize([dCaly]) dCal = list(map(lambda rowInput,rowOutput:list(rowInput)+[rowOutput], dCalXn, dCalyn[0])) cal_B_sol_prob = ciSolveMSE(dCal, belief=belief, kAdditive=kAdditive) fmMob = [0]+list(cal_B_sol_prob['x']) fmMob0 = list(map(lambda x:0 if x<0.0001 else x, fmMob)) fmFM = fromMoebius2FM (fmMob, 8) fmInt2D = list(map(lambda row:list(map(lambda e:0 if e<0.0001 else e,row)), ci2Interactions(fmFM,8))) timeEnd = time.time() return (fmFM, fmMob0, fmInt2D, timeEnd-timeStart) # dataCaliforniaFM2_10 = testCalifornia (10, belief=False, kAdditive=2) # dataCaliforniaFM3_10 = testCalifornia (10, belief=False, kAdditive=3) # dataCaliforniaFM2_100 = testCalifornia (100, belief=False, kAdditive=2) # dataCaliforniaFM3_100 = testCalifornia (100, belief=False, kAdditive=3) # dataCaliforniaFM2_1000 = testCalifornia (1000, belief=False, kAdditive=2) # dataCaliforniaFM3_1000 = testCalifornia (1000, belief=False, kAdditive=3) # dataCaliforniaFM2_10000= testCalifornia (10000, belief=False, kAdditive=2) # dataCaliforniaFM3_10000= testCalifornia (10000, belief=False, kAdditive=3) # dataCaliforniaFM2_20000= testCalifornia (20000, belief=False, kAdditive=2) # dataCaliforniaFM2_20640= testCalifornia (20000, belief=False, kAdditive=2) # dataCaliforniaFM3_20640= testCalifornia (20000, belief=False, kAdditive=3) def testDiabetes (nRecords, belief=False, kAdditive=2): """ Function: Learn fuzzy measure for the diabetes data set (from sklearn) using the first nRecords. Options belief and kAdditive parameters for the fuzzy measure Data size: 442 records, 10 inputs, 1 output Output: (0) fuzzy measure, (1) Moebius transform, (2) Shapley and interactions of two elements, (3) time elapsed in the function (in seconds). """ timeStart = time.time() diabetes = sklearn.datasets.load_diabetes() diaX, diay = diabetes.data, diabetes.target diaXn = preprocessing.normalize(diaX, axis=0) diayn = preprocessing.normalize([diay]) dia = list(map(lambda rowInput,rowOutput:list(rowInput)+[rowOutput], diaXn, diayn[0])) dia_B_sol_prob = ciSolveMSE(dia, belief=False, kAdditive=2) fmMob = [0]+list(dia_B_sol_prob['x']) fmMob0 = list(map(lambda x:0 if x<0.0001 else x, fmMob)) fmFM = fromMoebius2FM (fmMob, 10) fmInt2D = list(map(lambda row:list(map(lambda e:0 if e<0.0001 else e,row)), ci2Interactions(fmFM,10))) timeEnd = time.time() return (fmFM, fmMob0, fmInt2D, timeEnd-timeStart) # dataDiabetesFM2_10 = testDiabetes (10, belief=False, kAdditive=2) # dataDiabetesFM3_10 = testDiabetes (10, belief=False, kAdditive=3) # dataDiabetesFM2_100 = testDiabetes (100, belief=False, kAdditive=2) # dataDiabetesFM3_100 = testDiabetes (100, belief=False, kAdditive=3) # dataDiabetesFM2_442 = testDiabetes (442, belief=False, kAdditive=2) # dataDiabetesFM3_442 = testDiabetes (442, belief=False, kAdditive=3) def ciTrainData(n, nLearn, nTest, noiseLevelL, noiseLevelT,belief=False, kAdditive=False): maxSumT = [0]+list(np.random.uniform(low=0.0, high=1.0, size=2**n-1)) fmNN = fromMaxSum2FM (maxSumT, n) fmN = normalizeFM(fmNN) dsLearn = [] for i in range(0,nLearn): ex=list(np.random.uniform(low=0.0, high=10.0, size=n)) exOut = choquetIntegral (ex, fmN) + noiseLevelL * np.random.normal(loc=0.0, scale=1.0, size=None) ex.append(exOut) dsLearn.append(ex) dsTest = [] for i in range(0,nTest): ex=list(np.random.uniform(low=0.0, high=10.0, size=n)) exOut = choquetIntegral (ex, fmN) + noiseLevelT * np.random.normal(loc=0.0, scale=1.0, size=None) ex.append(exOut) dsTest.append(ex) return(fmN, dsLearn, dsTest) def ciTrainAndTestError (n, nLearn, nTest, noiseLevelL, noiseLevelT, belief=False, kAdditive=False): data_Test = ciTrainData(n, nLearn, nTest, noiseLevelL, noiseLevelT, belief, kAdditive) data_sol = ciSolveMSE(data_Test[1], belief, kAdditive) data_Mob = [0]+list(data_sol['x']) data_FM = fromMoebius2FM (data_Mob, n) data_estTest = list(map(lambda row:choquetIntegral(row[:-1],data_FM), data_Test[2])) # [10.0,2.8,6.5] # data_estTest = list(map(lambda row:choquetIntegral(row[1:],data_FM), data_Test[2])) # [10.0,2.8,6.5] data_outTest = list(map(lambda x:x[len(x)-1], data_Test[2])) data_difTest = vectorSum(data_estTest, vectorProductConstant(data_outTest,-1)) data_errTest = vectorProduct(data_difTest, data_difTest) of_Test = ciModel_of(data_Test[2], data_Mob) data_estTr = list(map(lambda row:choquetIntegral(row[:-1],data_FM), data_Test[1])) # [10.0,2.8,6.5] # data_estTr = list(map(lambda row:choquetIntegral(row[1:],data_FM), data_Test[1])) # [10.0,2.8,6.5] data_outTr = list(map(lambda x:x[len(x)-1], data_Test[1])) data_difTr = vectorSum(data_estTr, vectorProductConstant(data_outTr,-1)) data_errTr = vectorProduct(data_difTr, data_difTr) return(data_errTest, data_errTr, of_Test[0][0][0]) def ciTrainErrorAverages (n, nLearn, nTest, noiseLevelL, noiseLevelT, belief=False, kAdditive=False, nTimes=1): lErrTe = [] lErrTr = [] lOfTe = [] for i in range(0,nTimes): err = ciTrainAndTestError (n, nLearn, nTest, noiseLevelL, noiseLevelT, belief, kAdditive) lErrTe.append(err[0]) lErrTr.append(err[1]) lOfTe.append(err[2]) # return(mean(lErrors),statistics.stdev(lErrors))) return(mean(lErrTe),statistics.stdev(lErrTe),mean(lErrTr),statistics.stdev(lErrTr),mean(lOfTe)) ## EXPERIMENTS ## ## The code that follows is to reproduce the experiments described in the paper, ## as well as to generate the figures in the paper ## We include both the call as well as the results we obtained as an assignment to the variable ## (note that as some functions use random numbers the results of different executions may be different) kValues = range(1,9) res400K25bf = list(map(lambda k:ciTrainErrorAverages (8, 400, 80, 2, 0, belief=False, kAdditive=k,nTimes=25), kValues)) res400K25bt = list(map(lambda k:ciTrainErrorAverages (8, 400, 80, 2, 0, belief=True, kAdditive=k,nTimes=25), kValues)) ## Results of these computations: # res400K25bf = [(26.67750824067905, 12.369082999435282, 1665.7490036016568, 133.32295969619938, 26.67750824067907), (15.802278978931751, 4.127572971791897, 1493.982871465464, 85.09005084667189, 15.802278978932327), (17.669574133226234, 4.662320984835338, 1437.412918522892, 129.9389751958679, 17.66957413322607), (18.811912227417167, 3.2333110751188174, 1373.3292981691934, 112.58830765848012, 18.81191222741734), (22.30670374742088, 7.6877834556211235, 1312.7219461755906, 92.23987582658319, 22.306703747420098), (26.47880928167215, 7.742218484746488, 1289.213793467485, 105.27125603806707, 26.47880928167141), (23.859397178051864, 5.0494581900479565, 1362.7878433585965, 86.28521028138682, 23.85939717805247), (28.09392958672268, 6.531164561544416, 1304.1526562518377, 108.51432416935873, 28.093929586723362)] # res400K25bt = [(22.33479369178165, 10.270307253078844, 1656.8208873649387, 122.0119594684999, 22.33479369178164), (12.650808072721903, 4.0939311505389355, 1541.2438598276137, 114.79687318004255, 12.650808072721821), (13.143499581551575, 4.693810904978094, 1557.6927508192166, 93.27856403293983, 13.143499581551751), (12.418800526332662, 3.882841589510752, 1572.9773175819996, 80.67011484553748, 12.418800526333117), (11.143654441787193, 4.050275112227132, 1530.3422675922939, 99.22118649412919, 11.143654441787493), (12.90596552681202, 5.49603369396811, 1570.0541380752145, 106.95097345974752, 12.905965526812633), (11.635361212158502, 4.1957384027494005, 1541.8396409887685, 99.31053316813473, 11.635361212158632), (10.872744328147407, 2.832288016917102, 1539.29356051216, 93.4745589632081, 10.872744328147837)] # Figures in the paper ------------------------------------------------------ plt.close() plt.plot(kValues,list(map(lambda x:x[0], res400K25bf)), label="Test") plt.plot(kValues,list(map(lambda x:x[2]/60, res400K25bf)), label="Training") plt.legend() plt.xlabel('parameter k') plt.ylabel('error') plt.title('Fuzzy measure') plt.savefig('fig.ci.res400K25bf.eps', format='eps') plt.close() plt.plot(kValues,list(map(lambda x:x[0], res400K25bt)), label="Test") plt.plot(kValues,list(map(lambda x:x[2]/60, res400K25bt)), label="Training") plt.legend() plt.xlabel('parameter k') plt.ylabel('error') plt.title('Belief function') plt.savefig('fig.ci.res400K25bt.eps', format='eps') ### Other figures not used in the paper plt.close() plt.plot(kValues,res400K25bt) ## Nice plt.xlabel('parameter k') plt.ylabel('error') plt.title('Belief function') plt.savefig('fig.ciDim8x400t25k18bt.eps', format='eps') plt.close() plt.plot(kValues,res400K25bf) plt.xlabel('parameter k') plt.ylabel('error') plt.title('Arbitrary measure') plt.savefig('fig.ciDim8x400t25k18bf.eps', format='eps') ## ## RESULTS WITH 10 iterations ------------------------------------------------------- ## NOTE: The results we obtained in our own experiments are given below in an assignment to these variables noiseVal = range(0,5) resNoise100K10btOFk1 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=1,nTimes=10), noiseVal)) resNoise100K10bfOFk1 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=1,nTimes=10), noiseVal)) resNoise100K10btOFk2 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=2,nTimes=10), noiseVal)) resNoise100K10bfOFk2 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=2,nTimes=10), noiseVal)) resNoise100K10btOFk3 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=3,nTimes=10), noiseVal)) resNoise100K10bfOFk3 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=3,nTimes=10), noiseVal)) resNoise100K10btOFk4 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=4,nTimes=10), noiseVal)) resNoise100K10bfOFk4 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=4,nTimes=10), noiseVal)) resNoise100K10btOFk5 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=5,nTimes=10), noiseVal)) resNoise100K10bfOFk5 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=5,nTimes=10), noiseVal)) resNoise100K10btOFk6 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=6,nTimes=10), noiseVal)) resNoise100K10bfOFk6 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=6,nTimes=10), noiseVal)) resNoise100K10btOFk7 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=7,nTimes=10), noiseVal)) resNoise100K10bfOFk7 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=7,nTimes=10), noiseVal)) resNoise100K10btOFk8 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=True, kAdditive=8,nTimes=10), noiseVal)) resNoise100K10bfOFk8 = list(map(lambda ns:ciTrainErrorAverages (8, 100, 20, ns, 0, belief=False, kAdditive=8,nTimes=10), noiseVal)) plt.ion() plt.show() # Arbitrary measure plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOF)), label="Test") ## Test plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOF)), label="Train") ## Train plt.plot(noiseVal,list(map(lambda x:x[4],resNoise100K10bfOF)), label="Test") ## OF -- Test # Belief plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOF)), label="Test") ## Test plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOF)), label="Train") ## Train plt.plot(noiseVal,list(map(lambda x:x[4],resNoise100K10btOF)), label="Test") ## OF -- Test ### FIGURES IN THE PAPER ---------------------------------- ## CASE Belief function ## Training plt.close() plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10btOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('error') plt.title('Belief function') plt.savefig('fig.ci.resNoise100K10btOFk.Tr.eps', format='eps') ## Testing plt.close() plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10btOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('error') plt.title('Belief function') plt.savefig('fig.ci.resNoise100K10btOFk.Te.eps', format='eps') ## Testing STD plt.close() plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10btOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('STD Testing') plt.title('Belief function') plt.savefig('fig.ci.resNoise100K10btOFk.TeSTD.eps', format='eps') ## Training STD plt.close() plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10btOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('STD Training') plt.title('Belief function') plt.savefig('fig.ci.resNoise100K10btOFk.TrSTD.eps', format='eps') ## CASE Fuzzy measure ## Training plt.close() plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[2],resNoise100K10bfOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('error') plt.title('Fuzzy measure') plt.savefig('fig.ci.resNoise100K10bfOFk.Tr.eps', format='eps') ## Testing plt.close() plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[0],resNoise100K10bfOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('error') plt.title('Fuzzy measure') plt.savefig('fig.ci.resNoise100K10bfOFk.Te.eps', format='eps') ## Training plt.close() plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[3],resNoise100K10bfOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('STD Training') plt.title('Fuzzy measure') plt.savefig('fig.ci.resNoise100K10bfOFk.TrSTD.eps', format='eps') ## Testing plt.close() plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk1)), label="k=1") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk2)), label="k=2") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk3)), label="k=3") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk4)), label="k=4") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk5)), label="k=5") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk6)), label="k=6") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk7)), label="k=7") plt.plot(noiseVal,list(map(lambda x:x[1],resNoise100K10bfOFk8)), label="k=8") plt.legend() plt.xlabel('noise level') plt.ylabel('STD Testing') plt.title('Fuzzy measure') plt.savefig('fig.ci.resNoise100K10bfOFk.TeSTD.eps', format='eps') ## Stored results of previous computations to avoid recomputation: ## ## Case belief true: ## resNoise100K10btOFk1 = [(5.753336439000133, 3.6488025979124594, 25.877437991580308, 16.159055964636842, 5.753336439000106), (6.631971423950652, 5.6688100503243515, 110.48039126690287, 27.14510854107792, 6.6319714239506995), (18.432932893174847, 13.347248875286649, 449.03031890144604, 62.04511069179935, 18.432932893174893), (20.17479298562575, 11.01556147493735, 841.8360954193267, 200.8419619516895, 20.17479298562581), (23.48788026842969, 8.910288831494633, 1441.3808036816504, 135.38042116114343, 23.48788026842966)] resNoise100K10btOFk2 = [(0.6124765670594345, 0.23195370155392742, 1.7411763174837354, 0.2532489392040703, 0.6124765670593888), (2.5967863269352294, 1.1071775481081514, 96.64983578881238, 17.53325034331932, 2.596786326935171), (10.88056304539159, 7.54624770605378, 371.01349499456484, 54.0643182418227, 10.880563045391506), (13.917992467254024, 7.724721102868814, 791.0108976560714, 122.10633656187845, 13.917992467253914), (16.824883153687015, 8.342150269050196, 1413.2031172459265, 144.6588469716716, 16.824883153687118)] resNoise100K10btOFk3 = [(0.5816497502187492, 0.2926429607102751, 1.986162040347552, 0.9580018236644063, 0.5816497502189464), (3.0084636063414547, 2.053146484384137, 85.84242506324905, 11.703089916262906, 3.008463606341434), (7.403501618745272, 2.2917266371979403, 380.3102580875356, 66.99892742539765, 7.4035016187451905), (14.493233632765993, 11.56931553452269, 840.2704281835065, 139.8329808108734, 14.493233632765811), (15.70758390980426, 6.7016370350997105, 1333.6306207797877, 178.5051758403401, 15.707583909804214)] resNoise100K10btOFk4 = [(0.675497628239669, 0.3772821348324845, 2.0810038952121372, 1.8236273242343752, 0.6754976282397933), (3.626829313294846, 1.1933126086777193, 79.77065418285952, 8.35345432198522, 3.6268293132946328), (6.842235781956562, 2.2598542695553268, 366.7056792748146, 52.78764903220883, 6.842235781956549), (13.50646495009577, 5.96776750511048, 824.8934387448268, 58.09654674799561, 13.506464950095983), (17.522000092576565, 8.990270472431499, 1356.1557993955732, 248.02773676359683, 17.5220000925767)] resNoise100K10btOFk5 = [(0.7324363816632464, 0.31820055218828225, 2.5056523554380794, 0.805424859371857, 0.732436381663183), (3.4717313848997575, 1.2743300708570329, 94.61161558785224, 13.417725640745978, 3.471731384899647), (7.654916613412732, 3.3063840319230575, 349.8710374298762, 35.64183576910914, 7.65491661341282), (10.425909972613233, 4.726523523321291, 807.3136848582715, 87.41561976090077, 10.425909972613038), (23.02780396730667, 11.494716869413809, 1464.1031128369373, 256.1683515358694, 23.02780396730642)] resNoise100K10btOFk6 = [(0.6601792373345996, 0.17920278354182476, 2.2855005065777925, 1.0768745649692957, 0.6601792373346029), (3.3138364158224816, 1.5840034660138216, 85.80646664622171, 8.859670563017028, 3.3138364158223226), (6.477656227353873, 4.59874044652372, 345.7808711486837, 44.55665149917911, 6.477656227354106), (12.11434032086002, 5.798735972897694, 813.8489190050188, 104.53697810959363, 12.114340320860105), (22.464323292828233, 9.882630182265885, 1544.9273303057212, 194.36130864487316, 22.464323292828162)] resNoise100K10btOFk7 = [(0.9274495461042003, 0.5486226286048236, 2.491882942194835, 1.138573137347565, 0.9274495461044807), (3.6976026096131065, 1.349999215928091, 87.50778549800354, 17.888012591080468, 3.69760260961329), (7.219972632277117, 3.9793374266068677, 353.5126375052427, 25.284077621664796, 7.219972632277461), (19.381124179499857, 8.699907906900107, 831.974820790532, 127.28606162143778, 19.381124179499743), (14.306698027525764, 8.144801068794338, 1517.8901062093414, 204.90611639487574, 14.306698027526062)] resNoise100K10btOFk8 = [(0.5552928901133944, 0.2602415979069869, 2.2527590271715714, 1.1138346369087833, 0.5552928901130599), (3.6210526166550587, 1.611382134102323, 84.45863582066805, 17.97251345012711, 3.621052616655214), (8.156582632078127, 4.90767662915666, 388.51916196246856, 73.16421752332272, 8.156582632078244), (11.214967832175741, 4.436863375430114, 789.2665529300612, 100.40987101194203, 11.21496783217608), (21.827579104493974, 10.584670623120001, 1475.0808345880823, 232.0448505392817, 21.827579104493953)] ## ## Case belief false: ## resNoise100K10bfOFk1 = [(7.224401117131438, 4.028601667218122, 27.06931397216615, 13.272030691303954, 7.224401117131174), (7.049221928509513, 2.8358181586446096, 125.76866202096964, 29.398472620330903, 7.0492219285095645), (8.275950864260164, 2.449338728119515, 375.14708153433, 59.2838530529483, 8.275950864259954), (15.353797295886963, 7.9936292657345955, 860.9835804335611, 93.39669701849068, 15.35379729588713), (16.97094850758787, 4.414769306693811, 1497.5548714885567, 189.32614676525094, 16.970948507587792)] resNoise100K10bfOFk2 = [(0.9894908025127631, 0.25236912390622057, 1.4182515109285492, 0.4209809626900193, 0.9894908025130007), (4.419282267262402, 2.1692207096511664, 74.13168996303602, 8.68078637143642, 4.41928226726248), (7.433156583807923, 2.5707662343531603, 356.1971057376657, 65.68137492626363, 7.433156583808267), (15.472389580841773, 5.6615304644889255, 776.2212843074086, 125.63961867258105, 15.472389580841883), (23.25689308686177, 12.21376055701462, 1372.3559303330326, 142.87583501299508, 23.25689308686189)] resNoise100K10bfOFk3 = [(0.8389131023100665, 0.2932047322765531, 0.1296559846718852, 0.04371631106473963, 0.8389131023100959), (4.947445337099081, 4.067035391751146, 59.40150788867807, 8.575218341034603, 4.947445337099429), (10.529338593438414, 9.260707061530097, 279.6232710363212, 53.48471200988202, 10.529338593438364), (15.875662187995676, 4.926564411208662, 694.2732370372954, 97.24611795935846, 15.875662187995584), (14.890836963382807, 5.553855065759949, 1299.8699976530756, 164.79104278087243, 14.890836963382856)] resNoise100K10bfOFk4 = [(0.6267144931926635, 0.2696725741009748, 6.481501515146921e-07, 1.4638817917605121e-06, 0.6267144931927817), (4.529262535563484, 1.4928220649547972, 45.95025539478139, 8.066694699939081, 4.529262535563328), (9.55252322721146, 4.139035857480589, 257.2789039690788, 37.796139423664236, 9.552523227211617), (10.941871014976321, 3.251458468956736, 658.7387708007872, 114.56418394178691, 10.941871014975925), (14.198282941110424, 6.247966948880196, 1388.3160208432014, 249.64765502976283, 14.198282941110506)] resNoise100K10bfOFk5 = [(0.7375724644651127, 0.34612007992484906, 8.05272283199204e-08, 1.3723182242389402e-07, 0.7375724644652564), (5.729377513860319, 2.711580024504735, 42.337338399899025, 9.088661165041971, 5.729377513860385), (8.61469834850951, 3.6717559126977397, 248.75030180675603, 25.499507905172567, 8.614698348509943), (13.816292776702756, 5.5317559555354965, 624.0703878834679, 91.46600756202282, 13.816292776702648), (15.090791673179861, 5.202258179035881, 1207.5277116145166, 197.4396829356519, 15.09079167317969)] resNoise100K10bfOFk6 = [(0.848976676844638, 0.38761863136261526, 3.710720917863413e-08, 5.221558175566664e-08, 0.8489766768445008), (6.878645345972236, 3.5460704111674333, 44.278081476973185, 7.071749027104754, 6.878645345972285), (11.36230087520543, 4.600402163015479, 250.22735287598317, 58.75054689867051, 11.362300875205648), (15.596036771461996, 8.083827183069538, 621.1666956560416, 165.2073134110952, 15.596036771462087), (21.173048998337926, 9.411717307767976, 1216.7411033397434, 207.41073389101692, 21.173048998337823)] resNoise100K10bfOFk7 = [(0.9421988516341753, 0.48611475949967975, 2.4739517421575283e-08, 5.2584773682942047e-08, 0.9421988516344868), (7.712198014764235, 3.972705377789392, 47.176007936173, 11.041858806107301, 7.712198014763743), (12.183604859849122, 4.882325445777457, 251.52288142249412, 49.19082027424352, 12.183604859849334), (14.490947066769845, 3.965575874704081, 605.5532766437825, 87.34351604005357, 14.490947066769895), (16.239837086445085, 7.829683756594732, 1205.5539396104373, 138.11885536625763, 16.23983708644432)] resNoise100K10bfOFk8 = [(0.759041595188959, 0.3030010080412004, 1.1221275853134752e-07, 3.5002034549863854e-07, 0.7590415951887053), (5.4950063176897315, 2.3839369360791562, 42.33035280762816, 12.589894644019148, 5.495006317690008), (11.395183677013048, 4.96783355871412, 242.95337386750026, 47.3255176264835, 11.395183677012573), (14.19040278465794, 4.183455473483279, 647.3475634664968, 116.16810332423132, 14.19040278465775), (17.886972781382767, 11.278144781614282, 1257.5452100865032, 223.55394688063387, 17.886972781383342)]