-
Notifications
You must be signed in to change notification settings - Fork 58
Expand file tree
/
Copy pathTesting.fs
More file actions
1502 lines (1316 loc) · 105 KB
/
Testing.fs
File metadata and controls
1502 lines (1316 loc) · 105 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
module TestingTests
open Expecto
open System
open FsMath
open FSharp.Stats
open FSharp.Stats.Testing
open TestExtensions
open FSharp.Stats.Testing.SAM
open Deedle
[<Tests>]
let testPostHocTests =
//Tests taken from:
//https://www.icalcu.com/stat/anova-tukey-hsd-calculator.html
testList "Testing.PostHoc" [
(*
// Test ommitted due to extremely long runtime of CodeCov.
testCase "tukeyHSD" <| fun () ->
let dataA = [|3.;3.;4.;5.;2.;5.;5.;4.;4.;2.;2.;2.;4.;3.;5.;3.;4.;5.;3.;5.; |]
let dataB = [|10.;7.;9.;6.;7.;7.;6.;7.;10.;7.;8.;8.;8.;6.;10.;9.;9.;6.;9.;8.; |]
let dataC = [|6.;5.;6.;4.;4.;6.;1.;4.;6.;5.;4.;7.;4.;2.;1.;1.;3.;4.;5.;3.; |]
let dataD = [|10.;5.;6.;5.;8.;5.;6.;9.;3.;10.;5.;9.;5.;5.;6.;10.;9.;6.;9.;10.; |]
let dataE = [|14.;17.;14.;13.;18.;12.;17.;11.;12.;11.;12.;10.;17.;19.;18.;18.;15.;14.;18.;16.|]
let data = [|dataA;dataB;dataC;dataD;dataE|]
let contrastMatrix =
[|
//[|-1.;1.;0.;0.;0.;|] pvalue = zero
[|-1.;0.;1.;0.;0.;|]
[|-1.;0.;0.;1.;0.;|]
//[|-1.;0.;0.;0.;1.;|] pvalue = zero
[|0.;-1.;1.;0.;0.;|]
[|0.;-1.;0.;1.;0.;|]
//[|0.;-1.;0.;0.;1.;|] pvalue = zero
[|0.;0.;-1.;1.;0.;|]
//[|0.;0.;-1.;0.;1.;|] pvalue = zero
//[|0.;0.;0.;-1.;1.;|] pvalue = zero
|]
let pValues =
PostHoc.tukeyHSD contrastMatrix data
|> Array.map (fun x -> x.Significance)
//pvalues from R: TUKEY <- TukeyHSD(x=ANOVA, 'data$treatment', conf.level=0.95)
let rpval = [0.9685630;0.0000045;0.0000003;0.7072882;0.0000618]
Expect.floatClose Accuracy.low rpval.[0] pValues.[0] "p values should be equal."
Expect.floatClose Accuracy.low rpval.[1] pValues.[1] "p values should be equal."
Expect.floatClose Accuracy.low rpval.[2] pValues.[2] "p values should be equal."
Expect.floatClose Accuracy.low rpval.[3] pValues.[3] "p values should be equal."
Expect.floatClose Accuracy.low rpval.[4] pValues.[4] "p values should be equal."
*)
testCase "dunnett" <| fun () ->
let data =
[|
[|1.84;2.49;1.50;2.42;|]
[|2.43;1.85;2.42;2.73;|]
[|3.95;3.67;3.23;2.31;|]
[|3.21;3.20;2.32;3.30;|]
[|3.21;3.13;2.32;3.30;3.20;2.42;|]
|]
//first sample is control
let contrastMatrix =
[|
[|-1.;1.;0.;0.;0.|]
[|-1.;0.;1.;0.;0.|]
[|-1.;0.;0.;1.;0.|]
[|-1.;0.;0.;0.;1.|]
|]
let dunnettResult =
PostHoc.dunnetts contrastMatrix data Tables.dunnettsTwoSided095
//result from: SPSS Dunnett's test version 27
let pval = [0.811;0.010;0.050;0.049]
let dmean = [0.295;1.2275;0.945;0.8675]
Expect.equal dunnettResult.[0].Significance (pval.[0]<0.05) "Significance should be equal."
Expect.equal dunnettResult.[1].Significance (pval.[1]<0.05) "Significance should be equal."
Expect.equal dunnettResult.[2].Significance (pval.[2]<0.05) "Significance should be equal."
Expect.equal dunnettResult.[3].Significance (pval.[3]<0.05) "Significance should be equal."
Expect.floatClose Accuracy.high dunnettResult.[0].L dmean.[0] "Mean differences should be equal."
Expect.floatClose Accuracy.high dunnettResult.[1].L dmean.[1] "Mean differences should be equal."
Expect.floatClose Accuracy.high dunnettResult.[2].L dmean.[2] "Mean differences should be equal."
Expect.floatClose Accuracy.high dunnettResult.[3].L dmean.[3] "Mean differences should be equal."
]
[<Tests>]
let hTestTests =
// H-Test with ties tested against r implementation kruskal.test(weight ~ group, data = my_data)
let groupA = [4.17; 5.18; 5.18; 6.11; 4.50; 4.61; 5.17; 4.53; 5.33; 5.18;]
let groupB = [4.81; 4.17; 4.41; 3.59; 5.87; 3.83; 6.03; 4.89; 4.32; 4.69;]
let groupC = [6.31; 5.12; 5.00; 5.00; 5.00; 5.29; 5.00; 6.15; 5.80; 5.26;]
let samples = [groupA;groupB;groupC]
// calculation of the H test
let hResult =
HTest.createHTest samples
testList "Testing.HTest" [
testCase "createHTest" <| fun () ->
Expect.isTrue (0.03781 = Math.Round(hResult.PValueRight,5)) "pValue should be equal."
Expect.isTrue (6.5502 = Math.Round(hResult.Statistic,4)) "statistic should be equal."
]
[<Tests>]
let friedmanTestTests =
// Friedman-Test testes against dataset from https://www.methodenberatung.uzh.ch/de/datenanalyse_spss/unterschiede/zentral/friedman.html#3.2._Ergebnisse_des_Friedman-Tests and p-values obtained from distcalc and https://www.socscistatistics.com/pvalues/chidistribution.aspx
let A = [|275.;273.;288.;273.;244.|]
let B = [|292.;283.;284.;285.;329.|]
let C = [|281.;274.;298.;270.;252.|]
let D = [|284.;275.;271.;272.;258.|]
let E = [|285.;294.;307.;278.;275.|]
let F = [|283.;279.;301.;276.;279.|]
let G = [|290.;265.;298.;291.;295.|]
let H = [|294.;277.;295.;290.;271.|]
let I = [|300.;304.;293.;279.;271.|]
let J = [|284.;297.;352.;292.;284.|]
let samples = seq{A;B;C;D;E;F;G;H;I;J}
// modified dataset from UZH for 3x equal ranks
let A2 = [|275.;273.;288.;273.;273.|]
let B2 = [|292.;283.;284.;285.;329.|]
let C2 = [|281.;274.;298.;270.;252.|]
let D2 = [|284.;275.;271.;272.;258.|]
let E2 = [|285.;294.;307.;278.;275.|]
let F2 = [|283.;279.;301.;276.;279.|]
let G2 = [|290.;265.;298.;291.;295.|]
let H2 = [|294.;277.;295.;290.;271.|]
let I2 = [|300.;304.;293.;279.;271.|]
let J2 = [|284.;297.;284.;292.;284.|]
let samples2 = seq{A2;B2;C2;D2;E2;F2;G2;H2;I2;J2}
//calculation of friedman test
let friedmanResult1 =
FriedmanTest.createFriedmanTest samples
let friedmanResult2 =
FriedmanTest.createFriedmanTest samples2
testList "Testing.FriedmanTest" [
testCase "createFriedmanTest2equal" <| fun () ->
Expect.floatClose Accuracy.low friedmanResult1.Statistic 13.259 "statistics should be equal."
Expect.floatClose Accuracy.low friedmanResult1.PValueRight 0.010077 "pValue should be equal."
testCase "createFriedmanTest3equal" <| fun () ->
Expect.floatClose Accuracy.low friedmanResult2.Statistic 9.738 "statistics should be equal."
Expect.floatClose Accuracy.low friedmanResult2.PValueRight 0.04508 "pValue should be equal."
]
[<Tests>]
let wilcoxonTestTests =
// tested against SciPy Version 1.7.1
let before = seq{78.;24.;64.;45.;64.;52.;30.;50.;64.;50.;78.;22.;84.;40.;90.;72.}
let after = seq{78.;24.;62.;48.;68.;56.;25.;44.;56.;40.;68.;36.;68.;20.;58.;32.}
let differences = seq{0.;0.;2.;-3.;-4.;-4.;5.;6.;8.;10.;10.;-14.;16.;20.;32.;40.}
// with continuity correction:
let wilcoxon1 = WilcoxonTest.createWilcoxonTest before after true
let wilcoxon2 = WilcoxonTest.createWilcoxonTest before after false
let wilcoxon3 = WilcoxonTest.createWilcoxonTestFromDifferences differences true
let wilcoxon4 = WilcoxonTest.createWilcoxonTestFromDifferences differences false
// ties of size 3 (exposes the rank-multiplication bug): differences [1,1,1,2,-3]
// expected values computed with SciPy: wilcoxon([1,1,1,2,-3], mode='approx', correction=False/True)
let tieDiffs3 = seq{1.;1.;1.;2.;-3.}
let wilcoxon5 = WilcoxonTest.createWilcoxonTestFromDifferences tieDiffs3 false
let wilcoxon6 = WilcoxonTest.createWilcoxonTestFromDifferences tieDiffs3 true
testList "Testing.WilcoxonTest" [
testCase "wilcoxonWithCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon1.PValueTwoTailed 0.0382 "pValue should be equal."
testCase "wilcoxonWithoutCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon2.PValueTwoTailed 0.03537 "pValue should be equal."
testCase "wilcoxonDifferencesWithCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon3.PValueTwoTailed 0.0382 "pValue should be equal."
testCase "wilcoxonDifferencesWithoutCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon4.PValueTwoTailed 0.03537 "pValue should be equal."
testCase "wilcoxonOneSidedWithCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon1.PValueLeft 0.019102 "pValue should be equal"
testCase "wilcoxonOneSidedWithoutCorrection" <| fun () ->
Expect.floatClose Accuracy.low wilcoxon2.PValueRight 0.9823 "pValue should be equal"
testCase "wilcoxonTieSize3WithoutCorrection" <| fun () ->
// SciPy: wilcoxon([1,1,1,2,-3], mode='approx', correction=False) => pvalue ≈ 0.492207
Expect.floatClose Accuracy.low wilcoxon5.PValueTwoTailed 0.4922 "pValue should match SciPy with ties of size 3"
testCase "wilcoxonTieSize3WithCorrection" <| fun () ->
// SciPy: wilcoxon([1,1,1,2,-3], mode='approx', correction=True) => pvalue ≈ 0.582702
Expect.floatClose Accuracy.low wilcoxon6.PValueTwoTailed 0.5827 "pValue should match SciPy with ties of size 3 and correction"
]
[<Tests>]
let tTestTests =
// tested in SPSS version 27
let groupA = [|-5.;-3.;-3.;-4.;-5.;|]
let groupB = [|-2.;-4.;-4.;-6.;-6.;-6.;-5.;|]
let groupC = [|-3.;-7.;-8.;-4.;-2.; 1.;-1.;|]
let groupD = [|1.;-1.;0.;2.;2.;|]
let meanA = Seq.mean groupA
let meanB = Seq.mean groupB
let varA = Seq.var groupA
let varB = Seq.var groupB
let nA = float (Seq.length groupA)
let nB = float (Seq.length groupB)
// calculation of the H test
let tTest1 = TTest.twoSample true groupA groupB
let tTest2 = TTest.twoSampleFromMeanAndVar true (meanA,varA,nA) (meanB,varB,nB)
let tTest3 = TTest.twoSample false groupA groupB
let tTest4 = TTest.oneSample groupD 0.5
testList "Testing.TTest" [
testCase "twoSample" <| fun () ->
Expect.floatClose Accuracy.low tTest1.PValue 0.377 "pValue should be equal."
Expect.floatClose Accuracy.low tTest1.DegreesOfFreedom 10. "df should be equal."
Expect.floatClose Accuracy.low tTest1.Statistic 0.924 "t statistic should be equal."
Expect.floatClose Accuracy.low tTest3.PValue 0.345 "pValue should be equal."
Expect.floatClose Accuracy.low tTest3.DegreesOfFreedom 9.990 "df should be equal."
testCase "twoSampleFromMeanAndVar" <| fun () ->
Expect.equal tTest1 tTest2 "results should be equal."
// tested with R function (t.test(batcha, batchb, var.equal=TRUE))
let sample1 = [1.;2.;3;]
let sample2 = [1.;3.;5.;7;]
let mean = Seq.mean sample1
let mean2 = Seq.mean sample2
let var1 = Seq.var sample1
let var2 = Seq.var sample2
let ttestTwoSample = Testing.TTest.twoSampleFromMeanAndVar true (mean,var1,3) (mean2,var2,4)
let expectedPval = 0.26716219523142071
let expectedStatistic = -1.24837556786471859
Expect.floatClose Accuracy.high ttestTwoSample.PValue expectedPval "pValue should be equal."
Expect.floatClose Accuracy.high ttestTwoSample.Statistic expectedStatistic "t statistic should be equal."
let sample3 = [-1.;3.;-5.;7;]
let mean3 = Seq.mean sample2
let var3 = Seq.var sample2
let ttestTwoSample2 = Testing.TTest.twoSampleFromMeanAndVar true (mean,var1,3) (mean3,var3,4)
let expectedPval2 = 0.75954440793496059
let expectedStatistic2 = -0.323310403056781825
Expect.floatClose Accuracy.high ttestTwoSample2.PValue expectedPval "pValue should be equal."
Expect.floatClose Accuracy.high ttestTwoSample2.Statistic expectedStatistic "t statistic should be equal."
let ttestTwoSample3 = Testing.TTest.twoSampleFromMeanAndVar true (nan,var2,3) (mean2,var2,4)
Expect.isTrue (nan.Equals(ttestTwoSample3.PValue)) "pValue should be nan."
Expect.isTrue (nan.Equals(ttestTwoSample3.Statistic)) "t statistic should be nan."
testCase "oneSample" <| fun () ->
Expect.floatClose Accuracy.low tTest4.PValue 0.634 "pValue should be equal."
Expect.equal tTest4.DegreesOfFreedom 4. "df should be equal."
Expect.floatClose Accuracy.low tTest4.Statistic 0.514 "t statistic should be equal."
//tested with R function (t.test(c(-1,-2,-3), mu = -3, alternative = "two.sided"))
testCase "oneSampleFromMeanandStDev" <| fun () ->
let sample = [1.;2.;3;]
let mean = Seq.mean sample
let stdev = Seq.stDev sample
let ttest = Testing.TTest.oneSampleFromMeanAndStDev(mean,stdev,3) -3.
let expectedPval = 0.013072457560346513
let expectedStatistic = 8.6602540378443873
Expect.floatClose Accuracy.high ttest.PValue expectedPval "pValue should be equal."
Expect.floatClose Accuracy.high ttest.Statistic expectedStatistic "t statistic should be equal."
let sample = [-1.;-2.;-3;]
let mean3 = Seq.mean sample
let stdev1 = Seq.stDev sample
let ttest2 = Testing.TTest.oneSampleFromMeanAndStDev(mean3,stdev1,3) 0.
let expectedPval1 = 0.074179900227448525
let expectedStatistic2 = -3.46410161513775483
Expect.floatClose Accuracy.high ttest2.PValue expectedPval1 "pValue should be equal."
Expect.floatClose Accuracy.high ttest2.Statistic expectedStatistic2 "t statistic should be equal."
let mean2 = nan
let ttest3 = Testing.TTest.oneSampleFromMeanAndStDev(mean2,stdev,3) 0.
Expect.isTrue (nan.Equals(ttest3.PValue)) "pValue should be nan."
Expect.isTrue (nan.Equals(ttest3.Statistic)) "t statistic should be nan."
testCase "twoSamplePaired" <| fun () ->
// tested with R function t.test(x, y, paired = TRUE, alternative = "two.sided")
let vectorX = [|1.;2.;4.;8.|]
let vectorY = [|10.;23.;11;9.|]
let expectedPval = 0.10836944173355316
let expectedStatistic = 2.26554660552391818
let twoSamplePaired = Testing.TTest.twoSamplePaired vectorX vectorY
Expect.floatClose Accuracy.high twoSamplePaired.PValue expectedPval "pValue should be equal."
Expect.floatClose Accuracy.high twoSamplePaired.Statistic expectedStatistic "t statistic should be equal."
Expect.equal twoSamplePaired.DegreesOfFreedom 3. "df should be equal."
let vectorZ = [|-5.;-9.;0.;-8.|]
let twoSamplePaired2 = Testing.TTest.twoSamplePaired vectorX vectorZ
let expectedPval1 = 0.041226646225439562
let expectedStatistic1 = -3.44031028692427698
Expect.floatClose Accuracy.high twoSamplePaired2.PValue expectedPval1 "pValue should be equal."
Expect.floatClose Accuracy.high twoSamplePaired2.Statistic expectedStatistic1 "t statistic should be equal."
let vectorNan = [|nan;10.;23.;11.|]
let twoSamplePaired3 = Testing.TTest.twoSamplePaired vectorX vectorNan
Expect.isTrue (nan.Equals(twoSamplePaired3.PValue)) "pValue should be nan."
Expect.isTrue (nan.Equals(twoSamplePaired3.Statistic)) "t statistic should be nan."
let vectorBefore = [|10.;4.;15.;12.;6.|]
let twoSamplePaired4() = Testing.TTest.twoSamplePaired vectorBefore vectorX |>ignore
Expect.throws twoSamplePaired4 "Vectors of different length"
// test if exception Test works
// Expect.throws (fun _ -> Testing.TTest.twoSamplePaired vectorX vectorY|>ignore) "Vetors should have equal length"
let vectorWithInfinity = [|infinity;4.;15.;12.|]
let twoSamplePairedInfinity = Testing.TTest.twoSamplePaired vectorWithInfinity vectorX
Expect.isTrue (nan.Equals(twoSamplePairedInfinity.PValue)) "pValue should be nan."
Expect.isTrue (nan.Equals(twoSamplePairedInfinity.Statistic)) "t statistic should be nan."
let vectorWithNegativeInfinity = [|infinity;4.;15.;12.|]
let twoSampleNegativeInfinity = Testing.TTest.twoSamplePaired vectorWithNegativeInfinity vectorX
Expect.isTrue (nan.Equals(twoSampleNegativeInfinity.PValue)) "pValue should be nan."
Expect.isTrue (nan.Equals(twoSampleNegativeInfinity.Statistic)) "t statistic should be nan."
let vectorNull = [|0.;0.;0.;0.|]
let twoSamplePairedWithNullVector = Testing.TTest.twoSamplePaired vectorNull vectorY
let expectedPval2 = 0.0272
let expectedStatistic2 = 4.0451
Expect.floatClose Accuracy.low twoSamplePairedWithNullVector.PValue expectedPval2 "pValue should be equal."
Expect.floatClose Accuracy.low twoSamplePairedWithNullVector.Statistic expectedStatistic2 "t statistic should be equal."
]
[<Tests>]
let fTestTests =
// F-Test validated against res.ftest <- var.test(samplea, sampleb, alternative = "two.sided") RStudio 2022.02.3+492 "Prairie Trillium" Release (1db809b8323ba0a87c148d16eb84efe39a8e7785, 2022-05-20) for Windows
let sampleFA = [|5.0; 6.0; 5.8; 5.7|]
let sampleFB = [|3.5; 3.7; 4.0; 3.3; 3.6|]
let sampleNaN = [|5.0; 6.0; 5.8; nan|]
let sampleInf = [|5.0; 6.0; 5.8; infinity|]
let sampleNegInf = [|5.0; 6.0; 5.8; -infinity|]
let sampleties = [|5.0; 5.0; 5.8; 5.3|]
// calculation of the F test
let fResult = FTest.testVariances sampleFA sampleFB
let fResultTies = FTest.testVariances sampleFA sampleties
testList "Testing.FTest" [
testCase "createFTest" <| fun () ->
Expect.floatClose Accuracy.low fResult.Statistic 2.82338 "statistics should be equal."
Expect.floatClose Accuracy.low fResult.PValueTwoTailed 0.34172 "pValue should be equal."
testCase "FTest NaN" <| fun () ->
Expect.throws (fun () -> ((FTest.testVariances sampleNaN sampleFB).Statistic) |> printf "%A") "FTest works with NaN"
testCase "FTest infinities" <| fun () ->
Expect.throws (fun () -> ((FTest.testVariances sampleInf sampleFB).Statistic)|> printf "%A") "FTest works with divisions by infitity -> nan"
Expect.throws (fun () -> ((FTest.testVariances sampleNegInf sampleFB).Statistic)|> printf "%A") "FTest works with divisions by -infitity -> nan"
testCase "FTest 2 ties" <| fun () ->
Expect.floatClose Accuracy.low fResultTies.Statistic 1.32748538 "statistics should be equal."
Expect.floatClose Accuracy.low fResultTies.PValueTwoTailed 0.8214 "pValue should be equal."
]
[<Tests>]
let chiSquaredTests =
// ChiSquared https://www.graphpad.com/quickcalcs/chisquared2/
// example from R
// obs <- c(315, 101, 108, 32)
// exp <- c(0.5625, 0.1875, 0.1875, 0.0625)
// chisq.test(obs, p = exp)
let testCase1 =
let expected = [312.75;104.25;104.25;34.75]
let observed = [315.;101.;108.;32.]
let df = expected.Length - 1
ChiSquareTest.compute df expected observed
//obs <- c(315, 101, 80, 32, 50)
//exp <- c(0.5625, 0.1875, 0.0875, 0.0625,0.1)
//chisq.test(obs, p = exp)
let testCase2 =
let expected = [325.125;108.375;50.575;36.125;57.8]
let observed = [315.;101.;80.;32.;50.]
let df = expected.Length - 1
ChiSquareTest.compute df expected observed
testList "Testing.ChiSquaredTest" [
testCase "compute" <| fun () ->
Expect.isTrue (0.9254 = Math.Round(testCase1.PValueRight,4)) "pValue should be equal."
Expect.isTrue (0.4700 = Math.Round(testCase1.Statistic,4)) "statistic should be equal."
Expect.isTrue (0.000638 = Math.Round(testCase2.PValueRight,6)) "pValue should be equal."
Expect.isTrue (19.461 = Math.Round(testCase2.Statistic,3)) "statistic should be equal."
]
[<Tests>]
let pearsonTests =
// examples from R
// cor.test(x,y)
let testCase1 =
let seq1 = [44.4; 45.9; 41.9; 53.3; 44.7; 44.1; 50.7; 45.2; 60.1;]
let seq2 = [ 2.6; 3.1; 2.5; 5.0; 3.6; 4.0; 5.2; 2.8; 3.8;]
Correlation.testPearson seq1 seq2
let testCase2 =
let seq1 = [312.7; 104.2; 104.; 34.7]
let seq2 = [315.5; 101.3; 108.; 32.2]
Correlation.testPearson seq1 seq2
testList "Testing.Correlation" [
testCase "testPearson" <| fun () ->
Expect.isTrue (0.108173054 = Math.Round(testCase1.PValue,9)) "pValue should be equal"
Expect.isTrue (0.000294627 = Math.Round(testCase2.PValue,9)) "pValue should be equal"
]
[<Tests>]
let holmTests =
let largeSetnan =
Frame.ReadCsv(location = @"data/holmHochberg_Input_nan.csv",hasHeaders = true,separators = ",").GetColumn<float>("pValues")
|> Series.valuesAll
|> Array.ofSeq
|> Array.map (fun x -> if x.IsSome then x.Value else nan )
let largeSet =
largeSetnan |> Array.filter (fun x -> not (nan.Equals x))
// calculated using stats "The R Stats Package" v.4.5.1, using the p.adjust with method parameter set to "holm"
let largeSet_Expectednan =
Frame.ReadCsv(location = @"data/fwer_holm_results.csv",hasHeaders = true,separators = ",").GetColumn<float>("pValues")
|> Series.valuesAll
|> Array.ofSeq
|> Array.map (fun x -> if x.IsSome then x.Value else nan )
let largeSet_Expected =
largeSet_Expectednan
|> Array.filter (fun x -> not (nan.Equals x))
testList "Testing.MultipleTesting.holmFWER" [
testCase "testHolmLarge" (fun () ->
Expect.sequenceEqual
(largeSet |> MultipleTesting.holmFWER |> Seq.map (fun x -> Math.Round(x,9)))
(largeSet_Expected |> Seq.map (fun x -> Math.Round(x,9)))
"adjusted pValues should be equal to the reference implementation."
)
testCase "testHolmLargeNaN" (fun () ->
TestExtensions.sequenceEqualRoundedNaN 9
(largeSetnan |> MultipleTesting.holmFWER |> Seq.ofArray)
(largeSet_Expectednan |> Seq.ofArray)
"adjusted pValues should be equal to the reference implementation."
)
]
[<Tests>]
let hochbergTests =
let largeSetnan =
Frame.ReadCsv(location = @"data/holmHochberg_Input_nan.csv",hasHeaders = true,separators = ",").GetColumn<float>("pValues")
|> Series.valuesAll
|> Array.ofSeq
|> Array.map (fun x -> if x.IsSome then x.Value else nan )
let largeSet =
largeSetnan |> Array.filter (fun x -> not (nan.Equals x))
// calculated using stats "The R Stats Package" v.4.5.1, using the p.adjust with method parameter set to "hochberg"
let largeSet_Expectednan =
Frame.ReadCsv(location = @"data/fwer_hochberg_results.csv",hasHeaders = true,separators = ",").GetColumn<float>("pValues")
|> Series.valuesAll
|> Array.ofSeq
|> Array.map (fun x -> if x.IsSome then x.Value else nan )
let largeSet_Expected =
largeSet_Expectednan
|> Array.filter (fun x -> not (nan.Equals x))
testList "Testing.MultipleTesting.hochbergFWER" [
testCase "testHochbergLarge" (fun () ->
Expect.sequenceEqual
(largeSet |> MultipleTesting.hochbergFWER |> Seq.map (fun x -> Math.Round(x,9)))
(largeSet_Expected |> Seq.map (fun x -> Math.Round(x,9)))
"adjusted pValues should be equal to the reference implementation."
)
testCase "testHochbergLargeNaN" (fun () ->
TestExtensions.sequenceEqualRoundedNaN 9
(largeSetnan |> MultipleTesting.hochbergFWER |> Seq.ofArray)
(largeSet_Expectednan |> Seq.ofArray)
"adjusted pValues should be equal to the reference implementation."
)
]
[<Tests>]
let dunnSidakTests =
// p = [0.01; 0.04; 0.03; 0.1; 0.5], m=5
// expected values verified with Python: 1 - (1-p)^m
let pValues = [| 0.01; 0.04; 0.03; 0.1; 0.5 |]
let pNaN = [| 0.01; nan; 0.03; 0.1; 0.5 |]
testList "Testing.MultipleTesting.DunnSidak" [
testCase "singleStepBasic" <| fun () ->
let result = MultipleTesting.dunnSidakFWER pValues
// 1 - (1-p)^5 for each p
let expected = [| 0.049010; 0.184627; 0.141266; 0.40951; 0.96875 |]
Array.iter2 (fun r e ->
Expect.floatClose Accuracy.low r e "Single-step Šidák adjusted p-values should match."
) result expected
testCase "singleStepNaN" <| fun () ->
let result = MultipleTesting.dunnSidakFWER pNaN
// m=4 valid values; 1 - (1-p)^4
Expect.floatClose Accuracy.low result.[0] 0.039404 "p[0] with NaN should match."
Expect.isTrue (Double.IsNaN result.[1]) "NaN position should remain NaN."
Expect.floatClose Accuracy.low result.[2] 0.114707 "p[2] with NaN should match."
Expect.floatClose Accuracy.low result.[3] 0.34390 "p[3] with NaN should match."
Expect.floatClose Accuracy.low result.[4] 0.93750 "p[4] with NaN should match."
testCase "holmSidakBasic" <| fun () ->
let result = MultipleTesting.holmSidakFWER pValues
// sorted p: [0.01, 0.03, 0.04, 0.1, 0.5]
// raw: [1-0.99^5, 1-0.97^4, 1-0.96^3, 1-0.9^2, 1-0.5^1]
// running max (already monotone): [0.04901, 0.11471, 0.11526, 0.19, 0.5]
// back to original order: [0.04901, 0.11526, 0.11471, 0.19, 0.5]
let expected = [| 0.049010; 0.115264; 0.114707; 0.19; 0.5 |]
Array.iter2 (fun r e ->
Expect.floatClose Accuracy.low r e "Holm–Šidák adjusted p-values should match."
) result expected
testCase "holmSidakNaN" <| fun () ->
let result = MultipleTesting.holmSidakFWER pNaN
// m=4 valid values; sorted: [0.01, 0.03, 0.1, 0.5]
// raw: [1-0.99^4, 1-0.97^3, 1-0.9^2, 1-0.5^1] = [0.039404, 0.087327, 0.19, 0.5]
Expect.floatClose Accuracy.low result.[0] 0.039404 "p[0] Holm-Šidák NaN"
Expect.isTrue (Double.IsNaN result.[1]) "NaN position should remain NaN."
Expect.floatClose Accuracy.low result.[2] 0.087327 "p[2] Holm-Šidák NaN"
Expect.floatClose Accuracy.low result.[3] 0.19000 "p[3] Holm-Šidák NaN"
Expect.floatClose Accuracy.low result.[4] 0.50000 "p[4] Holm-Šidák NaN"
]
[<Tests>]
let benjaminiHochbergTests =
let largeSetWithIdsnan = readCsv @"benjaminiHochberg_Input_nan.csv"
let largeSetnan =
largeSetWithIdsnan |> Array.map snd
let largeSetWithIds =
largeSetWithIdsnan |> Array.filter (fun (_,x) -> not (nan.Equals x))
let largeSet =
largeSetnan |> Array.filter (fun x -> not (nan.Equals x))
let largeSetWithIds_Expectednan = readCsv @"benjaminiHochberg_AdjustedWithR_nan.csv"
let largeSet_Expectednan = largeSetWithIds_Expectednan |> Array.map snd
let largeSetWithIds_Expected =
largeSetWithIds_Expectednan
|> Array.filter (fun (_,x) -> not (nan.Equals x))
let largeSet_Expected =
largeSet_Expectednan
|> Array.filter (fun x -> not (nan.Equals x))
testList "Testing.MultipleTesting.BenjaminiHochberg" [
testCase "testBHLarge" (fun () ->
Expect.sequenceEqual
(largeSet |> MultipleTesting.benjaminiHochbergFDR |> Seq.map (fun x -> Math.Round(x,9)))
(largeSet_Expected |> Seq.map (fun x -> Math.Round(x,9)))
"adjusted pValues should be equal to the reference implementation."
)
testCase "testBHLargeNaN" (fun () ->
TestExtensions.sequenceEqualRoundedNaN 9
(largeSetnan |> MultipleTesting.benjaminiHochbergFDR)
(largeSet_Expectednan |> Seq.ofArray)
"adjusted pValues should be equal to the reference implementation."
)
testCase "testBHLargeBy" (fun () ->
Expect.sequenceEqual
(
largeSetWithIds
|> MultipleTesting.benjaminiHochbergFDRBy id
|> Seq.sortBy fst
|> Seq.map (fun (x,y) -> x, Math.Round(y,9))
)
(
largeSetWithIds_Expected
|> Seq.sortBy fst
|> Seq.map (fun (x,y) -> x, Math.Round(y,9))
)
"adjusted pValues with keys should be equal to the reference implementation."
)
testCase "testBHLargeNaNBy" (fun () ->
Expect.sequenceEqual
(
[("A0",nan); ("A0",nan); yield! largeSetWithIds]
|> MultipleTesting.benjaminiHochbergFDRBy id
|> Seq.sortBy fst
|> Seq.skip 2
|> Seq.map (fun (x,y) -> x, Math.Round(y,9))
)
(
largeSetWithIds_Expected
|> Seq.sortBy fst
|> Seq.map (fun (x,y) -> x, Math.Round(y,9))
)
"adjusted pValues with keys should be equal to the reference implementation, ignoring nan."
)
]
//[<Tests>] suspended because of long codecov run time
let qValuesTest =
let largeSetWithIdsnan = readCsv @"benjaminiHochberg_Input_nan.csv"
let largeSetnan = largeSetWithIdsnan |> Array.map snd
let largeSetWithIds_Expectednan = readCsv @"qvaluesWithR_nan.csv"
let largeSet_Expectednan = largeSetWithIds_Expectednan |> Array.map snd
let largeSetWithIds_ExpectedRobustnan = readCsv @"qvaluesRobustWithR_nan.csv"
let largeSet_ExpectedRobustnan = largeSetWithIds_ExpectedRobustnan |> Array.map snd
let largeSet =
largeSetnan |> Array.filter (fun x -> not (nan.Equals x))
let largeSet_Expected =
largeSet_Expectednan |> Array.filter (fun x -> not (nan.Equals x))
let largeSet_ExpectedRobust =
largeSet_ExpectedRobustnan |> Array.filter (fun x -> not (nan.Equals x))
testList "Testing.MultipleTesting.Qvalues" [
testCase "ofPValues" (fun () ->
//tested against r qvalue package 2.26.0
//pi0 estimation is in closed form in r package and therefore cannot be tested
//qvalue::qvalue(pvals,pi0=0.48345)
let pi0 = 0.48345
Expect.sequenceEqual
(largeSet |> MultipleTesting.Qvalues.ofPValues pi0 |> Seq.map (fun x -> Math.Round(x,9)))
(largeSet_Expected |> Seq.map (fun x -> Math.Round(x,9)))
"qValues should be equal to the reference implementation."
)
testCase "ofPValues_nan" (fun () ->
//tested against r qvalue package 2.26.0
//pi0 estimation is in closed form in r package and therefore cannot be tested
//qvalue::qvalue(pvals,pi0=0.48345)
let pi0 = 0.48345
TestExtensions.sequenceEqualRoundedNaN 9
(largeSetnan |> MultipleTesting.Qvalues.ofPValues pi0 |> Seq.ofArray)
(largeSet_Expectednan |> Seq.ofArray)
"qValues should be equal to the reference implementation."
)
testCase "ofPValuesRobust" (fun () ->
//tested against r qvalue package 2.26.0
//pi0 estimation is in closed form in r package and therefore cannot be tested
//qvalue::qvalue(pvals,pi0=0.48345,pfdr=TRUE)
let pi0 = 0.48345
Expect.sequenceEqual
(largeSet |> MultipleTesting.Qvalues.ofPValuesRobust pi0 |> Seq.map (fun x -> Math.Round(x,9)))
(largeSet_ExpectedRobust |> Seq.map (fun x -> Math.Round(x,9)))
"qValues Robust should be equal to the reference implementation."
)
testCase "ofPValuesRobust_nan" (fun () ->
//tested against r qvalue package 2.26.0
//pi0 estimation is in closed form in r package and therefore cannot be tested
//qvalue::qvalue(pvals,pi0=0.48345,pfdr=TRUE)
let pi0 = 0.48345
TestExtensions.sequenceEqualRoundedNaN 9
(largeSetnan |> MultipleTesting.Qvalues.ofPValuesRobust pi0 |> Seq.ofArray)
(largeSet_ExpectedRobustnan |> Seq.ofArray)
"qValues Robust should be equal to the reference implementation."
)
]
let createMetricTestInt metricName actual expected = testCase metricName (fun () -> Expect.equal actual expected (sprintf "Metric %s was calculated incorrectly." metricName))
let createMetricTestFloat accuracy metricName actual expected = testCase metricName (fun () -> Expect.floatClose accuracy actual expected (sprintf "Metric %s was calculated incorrectly." metricName))
[<Tests>]
let binaryConfusionMatrixTests =
// binary classification
// | Predicted |
// | P | N |
// | Actual | P | 3 | 1 |
// | N | 1 | 2 |
let tp = 3.
let tn = 2.
let fp = 1.
let fn = 1.
let binaryCM = BinaryConfusionMatrix.create(int tp,int tn,int fp,int fn)
let ofPredictions1 = BinaryConfusionMatrix.ofPredictions(1,[1;1;1;1;0;0;0],[1;1;1;0;1;0;0])
let ofPredictions2 = BinaryConfusionMatrix.ofPredictions([true;true;true;true;false;false;false],[true;true;true;false;true;false;false])
let expectedCM = {
TP = 3
TN = 2
FP = 1
FN = 1
}
testList "Testing.BinaryConfusionMatrix" [
testCase "create" (fun _ -> Expect.equal binaryCM expectedCM "binary confusion matrix incorrectly created")
testCase "ofPredictions1" (fun _ -> Expect.equal ofPredictions1 expectedCM "binary confusion matrix created incorrectly from observations with positive label")
testCase "ofPredictions2" (fun _ -> Expect.equal ofPredictions2 expectedCM "binary confusion matrix created incorrectly from boolean observations")
createMetricTestInt "TruePositives" binaryCM.TP 3
createMetricTestInt "TrueNegatives" binaryCM.TN 2
createMetricTestInt "FalsePositives" binaryCM.FP 1
createMetricTestInt "FalseNegatives" binaryCM.FN 1
testCase "thresholdMap implicit thresholds 1" (fun _ ->
let actual = BinaryConfusionMatrix.thresholdMap(
[true;true;true;true;false;false;false],
[0.9 ;0.6 ;0.7 ; 0.2 ; 0.7; 0.3 ; 0.1]
)
let expected = [
1.9, BinaryConfusionMatrix.create(0,3,0,4)
0.9, BinaryConfusionMatrix.create(1,3,0,3)
0.7, BinaryConfusionMatrix.create(2,2,1,2)
0.6, BinaryConfusionMatrix.create(3,2,1,1)
0.3, BinaryConfusionMatrix.create(3,1,2,1)
0.2, BinaryConfusionMatrix.create(4,1,2,0)
0.1, BinaryConfusionMatrix.create(4,0,3,0)
]
Expect.sequenceEqual actual expected "binary threshold map not correctly created from binary predictions"
)
testCase "thresholdMap explicit thresholds 1" (fun _ ->
let actual = BinaryConfusionMatrix.thresholdMap(
[true;true;true;true;false;false;false],
[0.9 ;0.6 ;0.7 ; 0.2 ; 0.7; 0.3 ; 0.1],
[1.; 0.9; 0.8; 0.7; 0.6; 0.5; 0.4; 0.3; 0.2; 0.1; 0.]
)
let expected = [|
1.9, BinaryConfusionMatrix.create(0,3,0,4)
1.0, BinaryConfusionMatrix.create(0,3,0,4)
0.9, BinaryConfusionMatrix.create(1,3,0,3)
0.8, BinaryConfusionMatrix.create(1,3,0,3)
0.7, BinaryConfusionMatrix.create(2,2,1,2)
0.6, BinaryConfusionMatrix.create(3,2,1,1)
0.5, BinaryConfusionMatrix.create(3,2,1,1)
0.4, BinaryConfusionMatrix.create(3,2,1,1)
0.3, BinaryConfusionMatrix.create(3,1,2,1)
0.2, BinaryConfusionMatrix.create(4,1,2,0)
0.1, BinaryConfusionMatrix.create(4,0,3,0)
0. , BinaryConfusionMatrix.create(4,0,3,0)
|]
Expect.sequenceEqual actual expected "binary threshold map not correctly created from binary predictions"
)
testCase "thresholdMap: floating point error affects custom thresholds" (fun _ ->
let actual = BinaryConfusionMatrix.thresholdMap(
[true;true;true;true;false;false;false],
[0.9 ;0.6 ;0.7 ; 0.2 ; 0.7; 0.3 ; 0.1],
// these values are not exact due to floating point errors in addition. For example, the 0.7 is actually 0.70000000000000006661338147750939 which is > 0.7 and therefore produces an unexpected result
[0. .. 0.1 .. 1.] |> List.rev
)
let expected = [|
1.9, BinaryConfusionMatrix.create(0,3,0,4)
1.0, BinaryConfusionMatrix.create(0,3,0,4)
0.9, BinaryConfusionMatrix.create(1,3,0,3)
0.8, BinaryConfusionMatrix.create(1,3,0,3)
0.7, BinaryConfusionMatrix.create(2,2,1,2)
0.6, BinaryConfusionMatrix.create(3,2,1,1)
0.5, BinaryConfusionMatrix.create(3,2,1,1)
0.4, BinaryConfusionMatrix.create(3,2,1,1)
0.3, BinaryConfusionMatrix.create(3,1,2,1)
0.2, BinaryConfusionMatrix.create(4,1,2,0)
0.1, BinaryConfusionMatrix.create(4,0,3,0)
0. , BinaryConfusionMatrix.create(4,0,3,0)
|]
Expect.isFalse (actual = expected) "expected list comprehension threshold to produce slightly incorrent thresholds"
)
]
[<Tests>]
let multiLabelConfusionMatrixTests =
// multi label classification
// | Predicted |
// | A | B | C |
// | Actual | A | 3 | 1 | 1 |
// | B | 1 | 2 | 0 |
// | C | 2 | 0 | 4 |
let c: Matrix<int> =
[
[3; 1; 1]
[1; 2; 0]
[2; 0; 4]
]
|> array2D
|> Matrix.ofArray2D
let expectedMLCM =
{
Labels = [|"A"; "B"; "C"|]
Confusion =
[
[3; 1; 1]
[1; 2; 0]
[2; 0; 4]
]
|> array2D
|> Matrix.ofArray2D
}
let multiLabelCM = MultiLabelConfusionMatrix.create([|"A";"B";"C"|], c)
let ofPredictions =
MultiLabelConfusionMatrix.ofPredictions(
[|"A"; "B"; "C"|],
[|"A"; "A"; "A"; "A"; "A"; "B"; "B"; "B"; "C"; "C"; "C"; "C"; "C"; "C"|],
[|"A"; "A"; "A"; "B"; "C"; "B"; "B"; "A"; "C"; "C"; "C"; "C"; "A"; "A"|]
)
let allVsAll = multiLabelCM |> MultiLabelConfusionMatrix.allVsAll
let expectedAllVsAll =
[
"A", BinaryConfusionMatrix.create(3,6,3,2)
"B", BinaryConfusionMatrix.create(2,10,1,1)
"C", BinaryConfusionMatrix.create(4,7,1,2)
]
testList "Testing.MultiLabelConfusionMatrix" [
testCase "create" (fun _ -> Expect.equal multiLabelCM expectedMLCM "multi label confusion matrix incorrectly created")
testCase "ofPredictions" (fun _ -> Expect.equal ofPredictions expectedMLCM "multi label confusion matrix created incorrectly from observations with positive label")
testCase "oneVsAll1" (fun _ -> Expect.equal (snd expectedAllVsAll[0]) (multiLabelCM |> MultiLabelConfusionMatrix.oneVsRest "A") "all-vs-all binary confusion matrices incorrectly created from multi label confusion matrix")
testCase "oneVsAll2" (fun _ -> Expect.equal (snd expectedAllVsAll[1]) (multiLabelCM |> MultiLabelConfusionMatrix.oneVsRest "B") "all-vs-all binary confusion matrices incorrectly created from multi label confusion matrix")
testCase "oneVsAll3" (fun _ -> Expect.equal (snd expectedAllVsAll[2]) (multiLabelCM |> MultiLabelConfusionMatrix.oneVsRest "C") "all-vs-all binary confusion matrices incorrectly created from multi label confusion matrix")
testCase "allVsAll" (fun _ -> Expect.sequenceEqual expectedAllVsAll allVsAll "all-vs-all binary confusion matrices incorrectly created from multi label confusion matrix")
]
[<Tests>]
let comparisonMetricsTests =
testList "Testing.ComparisonMetrics" [
// values calculated by formulas at https://en.wikipedia.org/wiki/Confusion_matrix
let sensitivity = 0.75
let specificity = 0.6666666667
let precision = 0.75
let negativePredictiveValue = 0.6666666667
let missrate = 0.25
let fallOut = 0.3333333333
let falseDiscoveryRate = 0.25
let falseOmissionRate = 0.3333333333
let positiveLikelihoodRatio = sensitivity / fallOut
let negativeLikelihoodRatio = missrate / specificity
let prevalenceThreshold = sqrt(fallOut) / (sqrt(sensitivity) + sqrt(fallOut))
let threatScore = 0.6
let prevalence = 0.5714285714
let accuracy = 0.7142857143
let balancedAccuracy = (sensitivity + specificity) / 2.
let f1 = 0.75
let phiCoefficient = 0.4166666667
let fowlkesMallowsIndex = 0.75
let informedness = 0.4166666667
let markedness = 0.4166666667
let diagnosticOddsRatio = positiveLikelihoodRatio / negativeLikelihoodRatio
let tp = 3.
let tn = 2.
let fp = 1.
let fn = 1.
let p = 4.
let n = 3.
let samplesize = 7.
let binaryCM = BinaryConfusionMatrix.create(3,2,1,1)
let cm = ComparisonMetrics.create(binaryCM)
testList "Metric calculation" [
createMetricTestFloat Accuracy.veryHigh "Calculate Sensitivity" (ComparisonMetrics.calculateSensitivity tp p) sensitivity
createMetricTestFloat Accuracy.veryHigh "Calculate Specificity" (ComparisonMetrics.calculateSpecificity tn n) specificity
createMetricTestFloat Accuracy.veryHigh "Calculate Precision" (ComparisonMetrics.calculatePrecision tp fp) precision
createMetricTestFloat Accuracy.veryHigh "Calculate NegativePredictiveValue" (ComparisonMetrics.calculateNegativePredictiveValue tn fn) negativePredictiveValue
createMetricTestFloat Accuracy.veryHigh "Calculate Missrate" (ComparisonMetrics.calculateMissrate fn p) missrate
createMetricTestFloat Accuracy.veryHigh "Calculate FallOut" (ComparisonMetrics.calculateFallOut fp n) fallOut
createMetricTestFloat Accuracy.veryHigh "Calculate FalseDiscoveryRate" (ComparisonMetrics.calculateFalseDiscoveryRate fp tp) falseDiscoveryRate
createMetricTestFloat Accuracy.veryHigh "Calculate FalseOmissionRate" (ComparisonMetrics.calculateFalseOmissionRate fn tn) falseOmissionRate
createMetricTestFloat Accuracy.veryHigh "Calculate PositiveLikelihoodRatio" (ComparisonMetrics.calculatePositiveLikelihoodRatio tp p fp n) positiveLikelihoodRatio
createMetricTestFloat Accuracy.veryHigh "Calculate NegativeLikelihoodRatio" (ComparisonMetrics.calculateNegativeLikelihoodRatio fn p tn n) negativeLikelihoodRatio
createMetricTestFloat Accuracy.veryHigh "Calculate PrevalenceThreshold" (ComparisonMetrics.calculatePrevalenceThreshold fp n tp p) prevalenceThreshold
createMetricTestFloat Accuracy.veryHigh "Calculate ThreatScore" (ComparisonMetrics.calculateThreatScore tp fn fp) threatScore
createMetricTestFloat Accuracy.veryHigh "Calculate Prevalence" (ComparisonMetrics.calculatePrevalence p samplesize) prevalence
createMetricTestFloat Accuracy.veryHigh "Calculate Accuracy" (ComparisonMetrics.calculateAccuracy tp tn samplesize) accuracy
createMetricTestFloat Accuracy.veryHigh "Calculate BalancedAccuracy" (ComparisonMetrics.calculateBalancedAccuracy tp p tn n) balancedAccuracy
createMetricTestFloat Accuracy.veryHigh "Calculate F1" (ComparisonMetrics.calculateF1 tp fp fn) f1
createMetricTestFloat Accuracy.veryHigh "Calculate PhiCoefficient" (ComparisonMetrics.calculatePhiCoefficient tp tn fp fn) phiCoefficient
createMetricTestFloat Accuracy.veryHigh "Calculate FowlkesMallowsIndex" (ComparisonMetrics.calculateFowlkesMallowsIndex tp fp p) fowlkesMallowsIndex
createMetricTestFloat Accuracy.veryHigh "Calculate Informedness" (ComparisonMetrics.calculateInformedness tp p tn n) informedness
createMetricTestFloat Accuracy.veryHigh "Calculate Markedness" (ComparisonMetrics.calculateMarkedness tp fp tn fn) markedness
createMetricTestFloat Accuracy.veryHigh "Calculate DiagnosticOddsRatio" (ComparisonMetrics.calculateDiagnosticOddsRatio tp tn fp fn p n) diagnosticOddsRatio
]
testList "Binary predictions" [
createMetricTestInt "TruePositives" cm.TP 3
createMetricTestInt "TrueNegatives" cm.TN 2
createMetricTestInt "FalsePositives" cm.FP 1
createMetricTestInt "FalseNegatives" cm.FN 1
createMetricTestInt "Positves" cm.P 4
createMetricTestInt "Negatives" cm.N 3
createMetricTestInt "Total" cm.SampleSize 7
createMetricTestFloat Accuracy.veryHigh "Sensitivity" cm.Sensitivity sensitivity
createMetricTestFloat Accuracy.veryHigh "Specificity" cm.Specificity specificity
createMetricTestFloat Accuracy.veryHigh "Precision" cm.Precision precision
createMetricTestFloat Accuracy.veryHigh "NegativePredictiveValue" cm.NegativePredictiveValue negativePredictiveValue
createMetricTestFloat Accuracy.veryHigh "Missrate" cm.Missrate missrate
createMetricTestFloat Accuracy.veryHigh "FallOut" cm.FallOut fallOut
createMetricTestFloat Accuracy.veryHigh "FalseDiscoveryRate" cm.FalseDiscoveryRate falseDiscoveryRate
createMetricTestFloat Accuracy.veryHigh "FalseOmissionRate" cm.FalseOmissionRate falseOmissionRate
createMetricTestFloat Accuracy.veryHigh "PositiveLikelihoodRatio" cm.PositiveLikelihoodRatio positiveLikelihoodRatio
createMetricTestFloat Accuracy.veryHigh "NegativeLikelihoodRatio" cm.NegativeLikelihoodRatio negativeLikelihoodRatio
createMetricTestFloat Accuracy.veryHigh "PrevalenceThreshold" cm.PrevalenceThreshold prevalenceThreshold
createMetricTestFloat Accuracy.veryHigh "ThreatScore" cm.ThreatScore threatScore
createMetricTestFloat Accuracy.veryHigh "Prevalence" cm.Prevalence prevalence
createMetricTestFloat Accuracy.veryHigh "Accuracy" cm.Accuracy accuracy
createMetricTestFloat Accuracy.veryHigh "BalancedAccuracy" cm.BalancedAccuracy balancedAccuracy
createMetricTestFloat Accuracy.veryHigh "F1" cm.F1 f1
createMetricTestFloat Accuracy.veryHigh "PhiCoefficient" cm.PhiCoefficient phiCoefficient
createMetricTestFloat Accuracy.veryHigh "FowlkesMallowsIndex" cm.FowlkesMallowsIndex fowlkesMallowsIndex
createMetricTestFloat Accuracy.veryHigh "Informedness" cm.Informedness informedness
createMetricTestFloat Accuracy.veryHigh "Markedness" cm.Markedness markedness
createMetricTestFloat Accuracy.veryHigh "DiagnosticOddsRatio" cm.DiagnosticOddsRatio diagnosticOddsRatio
]
testList "Multi-label predictions" [
let c: Matrix<int> =
[
[3; 1; 1]
[1; 2; 0]
[2; 0; 4]
]
|> array2D
|> Matrix.ofArray2D
let multiLabelCM = MultiLabelConfusionMatrix.create([|"A";"B";"C"|], c)
let expectedAvsRest = BinaryConfusionMatrix.create(3,6,3,2)
let expectedBvsRest = BinaryConfusionMatrix.create(2,10,1,1)
let expectedCvsRest = BinaryConfusionMatrix.create(4,7,1,2)
let expectedMicroAverage = ComparisonMetrics.create(BinaryConfusionMatrix.create(9,23,5,5))
let expectedMacroAverage =
[
expectedAvsRest
expectedBvsRest
expectedCvsRest
]
|> List.map (fun x -> ComparisonMetrics.create(x))
|> fun metrics ->
ComparisonMetrics.create(
((metrics[0].P + metrics[1].P + metrics[2].P ) / 3.),
((metrics[0].N + metrics[1].N + metrics[2].N ) / 3.),
((metrics[0].SampleSize + metrics[1].SampleSize + metrics[2].SampleSize ) / 3.),
((metrics[0].TP + metrics[1].TP + metrics[2].TP ) / 3.),
((metrics[0].TN + metrics[1].TN + metrics[2].TN ) / 3.),
((metrics[0].FP + metrics[1].FP + metrics[2].FP ) / 3.),
((metrics[0].FN + metrics[1].FN + metrics[2].FN ) / 3.),
((metrics[0].Sensitivity + metrics[1].Sensitivity + metrics[2].Sensitivity ) / 3.),
((metrics[0].Specificity + metrics[1].Specificity + metrics[2].Specificity ) / 3.),
((metrics[0].Precision + metrics[1].Precision + metrics[2].Precision ) / 3.),
((metrics[0].NegativePredictiveValue + metrics[1].NegativePredictiveValue+ metrics[2].NegativePredictiveValue) / 3.),
((metrics[0].Missrate + metrics[1].Missrate + metrics[2].Missrate ) / 3.),
((metrics[0].FallOut + metrics[1].FallOut + metrics[2].FallOut ) / 3.),
((metrics[0].FalseDiscoveryRate + metrics[1].FalseDiscoveryRate + metrics[2].FalseDiscoveryRate ) / 3.),
((metrics[0].FalseOmissionRate + metrics[1].FalseOmissionRate + metrics[2].FalseOmissionRate ) / 3.),
((metrics[0].PositiveLikelihoodRatio + metrics[1].PositiveLikelihoodRatio+ metrics[2].PositiveLikelihoodRatio) / 3.),
((metrics[0].NegativeLikelihoodRatio + metrics[1].NegativeLikelihoodRatio+ metrics[2].NegativeLikelihoodRatio) / 3.),
((metrics[0].PrevalenceThreshold + metrics[1].PrevalenceThreshold + metrics[2].PrevalenceThreshold ) / 3.),
((metrics[0].ThreatScore + metrics[1].ThreatScore + metrics[2].ThreatScore ) / 3.),
((metrics[0].Prevalence + metrics[1].Prevalence + metrics[2].Prevalence ) / 3.),
((metrics[0].Accuracy + metrics[1].Accuracy + metrics[2].Accuracy ) / 3.),
((metrics[0].BalancedAccuracy + metrics[1].BalancedAccuracy + metrics[2].BalancedAccuracy ) / 3.),