Last updated on 2019-03-25 09:57:10 CET.
Flavor | Version | Tinstall | Tcheck | Ttotal | Status | Flags |
---|---|---|---|---|---|---|
r-devel-linux-x86_64-debian-clang | 0.0.19 | 14.76 | 259.35 | 274.11 | ERROR | |
r-devel-linux-x86_64-debian-gcc | 0.0.19 | 10.73 | 195.21 | 205.94 | ERROR | |
r-devel-linux-x86_64-fedora-clang | 0.0.19 | 306.20 | ERROR | |||
r-devel-linux-x86_64-fedora-gcc | 0.0.19 | 297.56 | ERROR | |||
r-devel-windows-ix86+x86_64 | 0.0.19 | 27.00 | 333.00 | 360.00 | ERROR | |
r-patched-linux-x86_64 | 0.0.19 | 11.79 | 168.17 | 179.96 | OK | |
r-patched-solaris-x86 | 0.0.19 | 270.70 | NOTE | |||
r-release-linux-x86_64 | 0.0.19 | 7.98 | 168.51 | 176.49 | OK | |
r-release-windows-ix86+x86_64 | 0.0.19 | 17.00 | 305.00 | 322.00 | NOTE | |
r-release-osx-x86_64 | 0.0.19 | NOTE | ||||
r-oldrel-windows-ix86+x86_64 | 0.0.19 | 9.00 | 306.00 | 315.00 | NOTE | |
r-oldrel-osx-x86_64 | 0.0.19 | NOTE |
Version: 0.0.19
Check: tests
Result: ERROR
Running 'AHP.R' [0s/1s]
Running 'LPDMRSort.R' [1s/1s]
Running 'LPDMRSortIdentifyIncompatibleAssignments.R' [4s/4s]
Running 'LPDMRSortIdentifyUsedDictatorProfiles.R' [0s/1s]
Running 'LPDMRSortIdentifyUsedVetoProfiles.R' [0s/1s]
Running 'LPDMRSortInferenceApprox.R' [4s/4s]
Running 'LPDMRSortInferenceExact.R' [3s/3s]
Running 'MARE.R' [0s/1s]
Running 'MRSort.R' [0s/1s]
Running 'MRSortIdentifyIncompatibleAssignments.R' [2s/3s]
Running 'MRSortIdentifyUsedVetoProfiles.R' [0s/1s]
Running 'MRSortInferenceApprox.R' [7s/8s]
Running 'MRSortInferenceExact.R' [0s/1s]
Running 'SRMP.R' [1s/1s]
Running 'SRMPInference.R' [4s/5s]
Running 'SRMPInferenceApprox.R' [49s/52s]
Running 'SRMPInferenceApproxFixedLexicographicOrder.R' [15s/17s]
Running 'SRMPInferenceApproxFixedProfilesNumber.R' [58s/62s]
Running 'SRMPInferenceFixedLexicographicOrder.R' [1s/1s]
Running 'SRMPInferenceFixedProfilesNumber.R' [4s/5s]
Running 'SRMPInferenceNoInconsist.R' [1s/1s]
Running 'SRMPInferenceNoInconsistFixedLexicographicOrder.R' [3s/3s]
Running 'SRMPInferenceNoInconsistFixedProfilesNumber.R' [3s/3s]
Running 'TOPSIS.R' [0s/1s]
Running 'UTA.R' [0s/1s]
Running 'UTADIS.R' [0s/1s]
Running 'UTASTAR.R' [0s/1s]
Running 'additiveValueFunctionElicitation.R' [0s/0s]
Running 'applyPiecewiseLinearValueFunctionsOnPerformanceTable.R' [0s/1s]
Running 'assignAlternativesToCategoriesByThresholds.R' [0s/1s]
Running 'normalizePerformanceTable.R' [0s/1s]
Running 'pairwiseConsistencyMeasures.R' [0s/1s]
Running 'plotAlternativesValuesPreorder.R' [1s/2s]
Running 'plotMRSortSortingProblem.R' [0s/1s]
Running 'plotRadarPerformanceTable.R' [0s/1s]
Running 'weightedSum.R' [0s/1s]
Running the tests in 'tests/SRMPInferenceApproxFixedProfilesNumber.R' failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> altIDs <- c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")
>
> expectedValues <- expectedValues[altIDs]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApproxFixedProfilesNumber(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = altIDs)
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Final model fitness: 77.78%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = altIDs)
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Flavor: r-devel-linux-x86_64-debian-clang
Version: 0.0.19
Check: tests
Result: ERROR
Running ‘AHP.R’ [0s/1s]
Running ‘LPDMRSort.R’ [0s/1s]
Running ‘LPDMRSortIdentifyIncompatibleAssignments.R’ [3s/5s]
Running ‘LPDMRSortIdentifyUsedDictatorProfiles.R’ [0s/1s]
Running ‘LPDMRSortIdentifyUsedVetoProfiles.R’ [0s/1s]
Running ‘LPDMRSortInferenceApprox.R’ [3s/4s]
Running ‘LPDMRSortInferenceExact.R’ [2s/4s]
Running ‘MARE.R’ [0s/1s]
Running ‘MRSort.R’ [0s/1s]
Running ‘MRSortIdentifyIncompatibleAssignments.R’ [2s/3s]
Running ‘MRSortIdentifyUsedVetoProfiles.R’ [0s/1s]
Running ‘MRSortInferenceApprox.R’ [5s/8s]
Running ‘MRSortInferenceExact.R’ [0s/1s]
Running ‘SRMP.R’ [0s/1s]
Running ‘SRMPInference.R’ [3s/4s]
Running ‘SRMPInferenceApprox.R’ [36s/48s]
Running ‘SRMPInferenceApproxFixedLexicographicOrder.R’ [12s/18s]
Running ‘SRMPInferenceApproxFixedProfilesNumber.R’ [42s/62s]
Running ‘SRMPInferenceFixedLexicographicOrder.R’ [0s/1s]
Running ‘SRMPInferenceFixedProfilesNumber.R’ [3s/5s]
Running ‘SRMPInferenceNoInconsist.R’ [1s/1s]
Running ‘SRMPInferenceNoInconsistFixedLexicographicOrder.R’ [2s/4s]
Running ‘SRMPInferenceNoInconsistFixedProfilesNumber.R’ [2s/3s]
Running ‘TOPSIS.R’ [0s/1s]
Running ‘UTA.R’ [0s/1s]
Running ‘UTADIS.R’ [0s/1s]
Running ‘UTASTAR.R’ [0s/1s]
Running ‘additiveValueFunctionElicitation.R’ [0s/1s]
Running ‘applyPiecewiseLinearValueFunctionsOnPerformanceTable.R’ [0s/1s]
Running ‘assignAlternativesToCategoriesByThresholds.R’ [0s/1s]
Running ‘normalizePerformanceTable.R’ [0s/1s]
Running ‘pairwiseConsistencyMeasures.R’ [0s/1s]
Running ‘plotAlternativesValuesPreorder.R’ [1s/2s]
Running ‘plotMRSortSortingProblem.R’ [0s/1s]
Running ‘plotRadarPerformanceTable.R’ [0s/1s]
Running ‘weightedSum.R’ [0s/1s]
Running the tests in ‘tests/SRMPInferenceApproxFixedProfilesNumber.R’ failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> altIDs <- c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")
>
> expectedValues <- expectedValues[altIDs]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApproxFixedProfilesNumber(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = altIDs)
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Final model fitness: 77.78%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = altIDs)
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Flavor: r-devel-linux-x86_64-debian-gcc
Version: 0.0.19
Check: package dependencies
Result: NOTE
Package suggested but not available for checking: ‘cplexAPI’
Flavors: r-devel-linux-x86_64-fedora-clang, r-devel-linux-x86_64-fedora-gcc, r-devel-windows-ix86+x86_64, r-patched-solaris-x86, r-release-windows-ix86+x86_64, r-release-osx-x86_64, r-oldrel-windows-ix86+x86_64, r-oldrel-osx-x86_64
Version: 0.0.19
Check: tests
Result: ERROR
Running ‘AHP.R’
Running ‘LPDMRSort.R’
Running ‘LPDMRSortIdentifyIncompatibleAssignments.R’
Running ‘LPDMRSortIdentifyUsedDictatorProfiles.R’
Running ‘LPDMRSortIdentifyUsedVetoProfiles.R’
Running ‘LPDMRSortInferenceApprox.R’
Running ‘LPDMRSortInferenceExact.R’
Running ‘MARE.R’
Running ‘MRSort.R’
Running ‘MRSortIdentifyIncompatibleAssignments.R’
Running ‘MRSortIdentifyUsedVetoProfiles.R’
Running ‘MRSortInferenceApprox.R’
Running ‘MRSortInferenceExact.R’
Running ‘SRMP.R’
Running ‘SRMPInference.R’
Running ‘SRMPInferenceApprox.R’ [55s/62s]
Running ‘SRMPInferenceApproxFixedLexicographicOrder.R’ [17s/20s]
Running ‘SRMPInferenceApproxFixedProfilesNumber.R’ [56s/61s]
Running ‘SRMPInferenceFixedLexicographicOrder.R’
Running ‘SRMPInferenceFixedProfilesNumber.R’
Running ‘SRMPInferenceNoInconsist.R’
Running ‘SRMPInferenceNoInconsistFixedLexicographicOrder.R’
Running ‘SRMPInferenceNoInconsistFixedProfilesNumber.R’
Running ‘TOPSIS.R’
Running ‘UTA.R’
Running ‘UTADIS.R’
Running ‘UTASTAR.R’
Running ‘additiveValueFunctionElicitation.R’
Running ‘applyPiecewiseLinearValueFunctionsOnPerformanceTable.R’
Running ‘assignAlternativesToCategoriesByThresholds.R’
Running ‘normalizePerformanceTable.R’
Running ‘pairwiseConsistencyMeasures.R’
Running ‘plotAlternativesValuesPreorder.R’
Running ‘plotMRSortSortingProblem.R’
Running ‘plotRadarPerformanceTable.R’
Running ‘weightedSum.R’
Running the tests in ‘tests/SRMPInferenceApproxFixedProfilesNumber.R’ failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> altIDs <- c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")
>
> expectedValues <- expectedValues[altIDs]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApproxFixedProfilesNumber(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = altIDs)
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Final model fitness: 77.78%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = altIDs)
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Flavor: r-devel-linux-x86_64-fedora-clang
Version: 0.0.19
Check: tests
Result: ERROR
Running ‘AHP.R’
Running ‘LPDMRSort.R’
Running ‘LPDMRSortIdentifyIncompatibleAssignments.R’
Running ‘LPDMRSortIdentifyUsedDictatorProfiles.R’
Running ‘LPDMRSortIdentifyUsedVetoProfiles.R’
Running ‘LPDMRSortInferenceApprox.R’
Running ‘LPDMRSortInferenceExact.R’
Running ‘MARE.R’
Running ‘MRSort.R’
Running ‘MRSortIdentifyIncompatibleAssignments.R’
Running ‘MRSortIdentifyUsedVetoProfiles.R’
Running ‘MRSortInferenceApprox.R’
Running ‘MRSortInferenceExact.R’
Running ‘SRMP.R’
Running ‘SRMPInference.R’
Running ‘SRMPInferenceApprox.R’ [51s/61s]
Running ‘SRMPInferenceApproxFixedLexicographicOrder.R’ [17s/20s]
Running ‘SRMPInferenceApproxFixedProfilesNumber.R’ [54s/62s]
Running ‘SRMPInferenceFixedLexicographicOrder.R’
Running ‘SRMPInferenceFixedProfilesNumber.R’
Running ‘SRMPInferenceNoInconsist.R’
Running ‘SRMPInferenceNoInconsistFixedLexicographicOrder.R’
Running ‘SRMPInferenceNoInconsistFixedProfilesNumber.R’
Running ‘TOPSIS.R’
Running ‘UTA.R’
Running ‘UTADIS.R’
Running ‘UTASTAR.R’
Running ‘additiveValueFunctionElicitation.R’
Running ‘applyPiecewiseLinearValueFunctionsOnPerformanceTable.R’
Running ‘assignAlternativesToCategoriesByThresholds.R’
Running ‘normalizePerformanceTable.R’
Running ‘pairwiseConsistencyMeasures.R’
Running ‘plotAlternativesValuesPreorder.R’
Running ‘plotMRSortSortingProblem.R’
Running ‘plotRadarPerformanceTable.R’
Running ‘weightedSum.R’
Running the tests in ‘tests/SRMPInferenceApprox.R’ failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> expectedValues <- expectedValues[c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApprox(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18"))
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Best fitness so far: 88.89%"
[1] "Final model fitness: 88.89%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18"))
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Running the tests in ‘tests/SRMPInferenceApproxFixedProfilesNumber.R’ failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> altIDs <- c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")
>
> expectedValues <- expectedValues[altIDs]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApproxFixedProfilesNumber(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = altIDs)
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Final model fitness: 77.78%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = altIDs)
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Flavor: r-devel-linux-x86_64-fedora-gcc
Version: 0.0.19
Check: tests
Result: ERROR
Running 'AHP.R' [0s]
Running 'LPDMRSort.R' [1s]
Running 'LPDMRSortIdentifyIncompatibleAssignments.R' [6s]
Running 'LPDMRSortIdentifyUsedDictatorProfiles.R' [1s]
Running 'LPDMRSortIdentifyUsedVetoProfiles.R' [0s]
Running 'LPDMRSortInferenceApprox.R' [4s]
Running 'LPDMRSortInferenceExact.R' [4s]
Running 'MARE.R' [0s]
Running 'MRSort.R' [1s]
Running 'MRSortIdentifyIncompatibleAssignments.R' [2s]
Running 'MRSortIdentifyUsedVetoProfiles.R' [0s]
Running 'MRSortInferenceApprox.R' [6s]
Running 'MRSortInferenceExact.R' [0s]
Running 'SRMP.R' [1s]
Running 'SRMPInference.R' [3s]
Running 'SRMPInferenceApprox.R' [48s]
Running 'SRMPInferenceApproxFixedLexicographicOrder.R' [14s]
Running 'SRMPInferenceApproxFixedProfilesNumber.R' [62s]
Running 'SRMPInferenceFixedLexicographicOrder.R' [3s]
Running 'SRMPInferenceFixedProfilesNumber.R' [1s]
Running 'SRMPInferenceNoInconsist.R' [1s]
Running 'SRMPInferenceNoInconsistFixedLexicographicOrder.R' [57s]
Running 'SRMPInferenceNoInconsistFixedProfilesNumber.R' [10s]
Running 'TOPSIS.R' [1s]
Running 'UTA.R' [0s]
Running 'UTADIS.R' [0s]
Running 'UTASTAR.R' [0s]
Running 'additiveValueFunctionElicitation.R' [1s]
Running 'applyPiecewiseLinearValueFunctionsOnPerformanceTable.R' [0s]
Running 'assignAlternativesToCategoriesByThresholds.R' [1s]
Running 'normalizePerformanceTable.R' [0s]
Running 'pairwiseConsistencyMeasures.R' [0s]
Running 'plotAlternativesValuesPreorder.R' [1s]
Running 'plotMRSortSortingProblem.R' [1s]
Running 'plotRadarPerformanceTable.R' [1s]
Running 'weightedSum.R' [0s]
Running the tests in 'tests/SRMPInferenceApproxFixedProfilesNumber.R' failed.
Complete output:
> # ranking some students
>
> library(MCDA)
>
> # the performance table
>
> performanceTable <- rbind(c(10,10,9),c(10,9,10),c(9,10,10),c(9,9,10),c(9,10,9),c(10,9,9),
+ c(10,10,7),c(10,7,10),c(7,10,10),c(9,9,17),c(9,17,9),c(17,9,9),
+ c(7,10,17),c(10,17,7),c(17,7,10),c(7,17,10),c(17,10,7),c(10,7,17),
+ c(7,9,17),c(9,17,7),c(17,7,9),c(7,17,9),c(17,9,7),c(9,7,17))
>
> criteriaMinMax <- c("max","max","max")
>
> rownames(performanceTable) <- c("a1","a2","a3","a4","a5","a6","a7","a8","a9","a10","a11","a12","a13","a14","a15","a16","a17","a18","a19","a20","a21","a22","a23","a24")
>
> colnames(performanceTable) <- c("c1","c2","c3")
>
> names(criteriaMinMax) <- colnames(performanceTable)
>
> # expected result for the tests below
>
> expectedValues <- c(10,7,13,3,5,1,10,7,13,4,6,2,14,12,8,15,11,9,4,6,2,6,2,4)
>
> names(expectedValues) <- rownames(performanceTable)
>
> altIDs <- c("a1","a3","a7","a9","a13","a14","a15","a16","a17","a18")
>
> expectedValues <- expectedValues[altIDs]
>
> expectedValues <- expectedValues - min(expectedValues) + 1
>
> # test - preferences and indifferences
>
> preferencePairs <- matrix(c("a16","a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12",
+ "a13","a3","a14","a17","a1","a18","a15","a2","a11","a5","a10","a4","a12","a6"),14,2)
> indifferencePairs <- matrix(c("a3","a1","a2","a11","a11","a20","a10","a10","a19","a12","a12","a21",
+ "a9","a7","a8","a20","a22","a22","a19","a24","a24","a21","a23","a23"),12,2)
>
> set.seed(1)
>
> result<-SRMPInferenceApproxFixedProfilesNumber(performanceTable, criteriaMinMax, 3, preferencePairs, indifferencePairs, alternativesIDs = altIDs)
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Best fitness so far: 77.78%"
[1] "Final model fitness: 77.78%"
>
> alternativesValues<-SRMP(performanceTable, result$referenceProfiles, result$lexicographicOrder, result$criteriaWeights, criteriaMinMax, alternativesIDs = altIDs)
>
> stopifnot(all(alternativesValues == expectedValues))
Error: all(alternativesValues == expectedValues) is not TRUE
Execution halted
Flavor: r-devel-windows-ix86+x86_64