Source file owl_base_dense_ndarray_generic.ml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
# 1 "src/base/dense/owl_base_dense_ndarray_generic.ml"
open Bigarray
open Owl_types
type ('a, 'b) t = ('a, 'b, c_layout) Genarray.t
type ('a, 'b) kind = ('a, 'b) Bigarray.kind
module Scalar = Owl_base_maths
let _prepend_dims dims desired_len =
let dims_len = Array.length dims in
if dims_len >= desired_len
then dims
else (Array.append (Array.make (desired_len - dims_len) 1) dims)
let _get_broadcasted_dims dims_a dims_b =
let len_c = Pervasives.max (Array.length dims_a) (Array.length dims_b) in
let ext_dims_a = _prepend_dims dims_a len_c in
let ext_dims_b = _prepend_dims dims_b len_c in
let dims_c = Array.make len_c 0 in
begin
for i = 0 to len_c - 1 do
let val_a = ext_dims_a.(i) in
let val_b = ext_dims_b.(i) in
if val_a = val_b
then dims_c.(i) <- val_a
else
begin
if val_a != 1 && val_b != 1
then raise (Invalid_argument "The arrays cannot be broadcast into the same shape")
else dims_c.(i) <- (Pervasives.max val_a val_b)
end
done;
(ext_dims_a, ext_dims_b, dims_c)
end
let _next_index ind dims =
let num_dims = Array.length ind in
let p = ref (num_dims - 1) in
let ok = ref false in
begin
while !p >= 0 && not !ok do
if ind.(!p) + 1 < dims.(!p) then
begin
ind.(!p) <- (ind.(!p) + 1);
ok := true;
end
else
begin
ind.(!p) <- 0;
p := !p - 1;
end
done;
!ok
end
let _get_broadcasted_index ind dims =
let num_dims = Array.length dims in
let calc_fun =
(fun i ->
let max_ind = dims.(i) in
let ind_val = ind.(i) in
if ind_val < max_ind
then ind_val
else (
if max_ind = 1
then 0
else raise (Invalid_argument "not broadcasted correctly")
)
) in
(Array.init num_dims calc_fun)
let _apply_perm arr perm =
Array.init (Array.length arr) (fun i -> arr.(perm.(i)))
let _draw_int_samples replacement range count =
if not replacement && count > range
then raise (Invalid_argument "cannot draw that many samples from the given range, without replacement")
else (
let pop_cnt = ref range in
let pop = Array.init !pop_cnt (fun i -> i) in
let rand_gen = Random.State.make_self_init() in
let draw_fun = (fun _ ->
let index = Random.State.int rand_gen !pop_cnt in
let sample = pop.(index) in
if replacement
then sample
else (
pop_cnt := !pop_cnt - 1;
pop.(index) <- pop.(!pop_cnt);
sample
)
)
in
Array.init count draw_fun
)
let _enumerate_slice_def dim ?(step) start stop =
let start = if start < 0 then dim + start else start in
let stop = if stop < 0 then dim + stop else stop in
let step = match step with
| Some x -> x
| None -> if (start <= stop) then 1 else -1
in
let _ =
assert (((start <= stop) && (step > 0)) || ((start > stop) && (step < 0)))
in
let step_abs = Pervasives.abs step in
let len = ((Pervasives.abs (stop - start)) + step_abs) / step_abs in
(Array.init len (fun i -> start + i * step))
let _expand_slice_indices index_list dims =
let rank = Array.length dims in
let sdef_len = List.length index_list in
let _expand_slice_index = (
fun i ind -> match ind with
| [] -> Array.init dims.(i) (fun i -> i)
| [start] -> _enumerate_slice_def dims.(i) start start
| [start; stop] -> _enumerate_slice_def dims.(i) start stop
| [start; stop; step] -> _enumerate_slice_def dims.(i) ~step:step start stop
| _ -> failwith "incorrect slice definition"
) in
Array.append
(Array.of_list (List.mapi _expand_slice_index index_list))
(Array.init (rank - sdef_len)
(fun p -> Array.init dims.(p + sdef_len) (fun i -> i)))
let empty kind dims = Genarray.create kind c_layout dims
let create kind dims value =
let varr = empty kind dims in
Genarray.fill varr value;
varr
let zeros kind dims = create kind dims (Owl_const.zero kind)
let ones kind dims = create kind dims (Owl_const.one kind)
let shape varr = Genarray.dims varr
let num_dims varr = Array.length (shape varr)
let numel varr =
let v_shape = shape varr in
Array.fold_left ( * ) 1 v_shape
let kind varr = Genarray.kind varr
let get varr index = (Genarray.get varr index)
let set varr index value = (Genarray.set varr index value)
let get_slice index_list varr =
let dims = shape varr in
let rank = Array.length dims in
let index_array = _expand_slice_indices index_list dims in
let slice_dims = Array.map (fun a -> Array.length a) index_array in
let slice_varr = empty (kind varr) slice_dims in
let slice_ind = Array.make rank 0 in
let original_ind = Array.make rank 0 in
let should_stop = ref false in
begin
while not !should_stop do
for i = 0 to rank - 1 do
original_ind.(i) <- (index_array.(i)).(slice_ind.(i))
done;
Genarray.set slice_varr slice_ind (Genarray.get varr original_ind);
if not (_next_index slice_ind slice_dims) then
should_stop := true
done;
slice_varr
end
let set_slice index_list varr slice_varr =
let dims = shape varr in
let rank = Array.length dims in
let index_array = _expand_slice_indices index_list dims in
let slice_dims = Array.map (fun a -> Array.length a) index_array in
let slice_varr = reshape slice_varr slice_dims in
let slice_ind = Array.make rank 0 in
let original_ind = Array.make rank 0 in
let should_stop = ref false in
begin
while not !should_stop do
for i = 0 to rank - 1 do
original_ind.(i) <- (index_array.(i)).(slice_ind.(i))
done;
Genarray.set varr original_ind (Genarray.get slice_varr slice_ind);
if not (_next_index slice_ind slice_dims) then
should_stop := true
done;
end
let copy varr =
let varr_copy = empty (kind varr) (shape varr) in
begin
Genarray.blit varr varr_copy; varr_copy
end
let reset varr = (Genarray.fill varr 0.)
let reshape x d =
let minus_one = Owl_utils.Array.count d (-1) in
assert (minus_one <= 1);
if minus_one = 0 then reshape x d
else (
let n = numel x in
let m = Array.fold_right ( * ) d (-1) in
let e = Array.map (fun a -> if a = -1 then n / m else a) d in
reshape x e
)
let flatten varr = (reshape varr [|(numel varr)|])
let reverse varr =
let n = numel varr in
let ret = empty (kind varr) (shape varr) in
let ret_flat = reshape ret [|n|] in
let varr_flat = reshape varr [|n|] in
begin
for i = 0 to n - 1 do
set ret_flat [|i|] (get varr_flat [|n - 1 - i|])
done;
ret
end
let _apply_fun f varr =
let varr_linear = flatten varr |> array1_of_genarray in
let length = numel varr in
begin
for i = 0 to length - 1 do
(Array1.unsafe_set varr_linear i (f (Array1.unsafe_get varr_linear i)))
done
end
let init kind dims f =
let varr = empty kind dims in
let varr_flat = flatten varr |> array1_of_genarray in
let n = numel varr in
begin
for i = 0 to n - 1 do
Array1.unsafe_set varr_flat i (f i)
done;
varr
end
let map f varr =
let varr_copy = copy varr in
(_apply_fun f varr_copy; varr_copy)
let mapi f x =
let y = copy x in
let y' = flatten y |> array1_of_genarray in
for i = 0 to (Array1.dim y') - 1 do
let a = Array1.unsafe_get y' i in
Array1.unsafe_set y' i (f i a)
done;
y
let strides x = x |> shape |> Owl_utils.calc_stride
let slice_size x = x |> shape |> Owl_utils.calc_slice
let foldi ?axis f a x =
let x' = flatten x |> array1_of_genarray in
match axis with
| Some axis -> (
let m, n, o, s = Owl_utils.reduce_params axis x in
let start_x = ref 0 in
let start_y = ref 0 in
let incy = ref 0 in
let k = ref 0 in
let y = create (kind x) s a in
let y' = flatten y |> array1_of_genarray in
for i = 0 to m - 1 do
for j = 0 to n - 1 do
let b = Array1.unsafe_get y' (!start_y + !incy) in
let c = Array1.unsafe_get x' (!start_x + j) in
Array1.unsafe_set y' (!start_y + !incy) (f !k b c);
if !incy + 1 = o then incy := 0
else incy := !incy + 1;
k := !k + 1;
done;
start_x := !start_x + n;
start_y := !start_y + o;
done;
y
)
| None -> (
let b = ref a in
for i = 0 to (numel x) - 1 do
let c = Array1.unsafe_get x' i in
b := f i !b c
done;
create (kind x) [|1|] !b
)
let fold ?axis f a x = foldi ?axis (fun _ b c -> f b c) a x
let scani ?axis f x =
let d = num_dims x in
let a = match axis with
| Some a -> a
| None -> d - 1
in
assert (0 <= a && a < d);
let _stride = strides x in
let _slicez = slice_size x in
let m = (numel x) / _slicez.(a) in
let n = _slicez.(a) - _stride.(a) in
let incx = _slicez.(a) in
let incy = _slicez.(a) in
let start_x = ref 0 in
let start_y = ref _stride.(a) in
let k = ref 0 in
let y = copy x in
let y' = flatten y |> array1_of_genarray in
for i = 0 to m - 1 do
for j = 0 to n - 1 do
let b = Array1.unsafe_get y' (!start_x + j) in
let c = Array1.unsafe_get y' (!start_y + j) in
Array1.unsafe_set y' (!start_y + j) (f !k b c);
k := !k + 1
done;
start_x := !start_x + incx;
start_y := !start_y + incy;
done;
y
let scan ?axis f x = scani ?axis (fun _ a b -> f a b) x
let iteri f x =
let x' = flatten x |> array1_of_genarray in
for i = 0 to (Array1.dim x') - 1 do
let a = Array1.unsafe_get x' i in
f i a
done
let iter f x =
let x' = flatten x |> array1_of_genarray in
for i = 0 to (Array1.dim x') - 1 do
let a = Array1.unsafe_get x' i in
f a
done
let filteri f x =
let s = Owl_utils.Stack.make () in
iteri (fun i y ->
if f i y = true then
Owl_utils.Stack.push s i
) x;
Owl_utils.Stack.to_array s
let filter f x = filteri (fun _ y -> f y) x
let sequential kind ?(a=0.) ?(step=1.) dims =
let varr = empty kind dims in
let count = ref 0. in
let seq_fun =
(fun x -> (count := !count +. 1.; a +. (!count -. 1.) *. step))
in
_apply_fun seq_fun varr;
varr
let of_array kind arr dims =
let varr = empty kind dims in
let flat_varr = flatten varr |> array1_of_genarray in
let n = numel varr in
begin
for i = 0 to n - 1 do
Array1.unsafe_set flat_varr i arr.(i)
done;
varr
end
let uniform kind ?(a=0.) ?(b=1.) dims =
let uniform_gen_fun = (fun _ -> Owl_base_stats.uniform_rvs ~a ~b) in
let varr = empty kind dims in
_apply_fun uniform_gen_fun varr;
varr
let bernoulli kind ?(p=0.5) dims =
let bernoulli_gen_fun = (fun _ -> Owl_base_stats.bernoulli_rvs ~p) in
let varr = empty kind dims in
_apply_fun bernoulli_gen_fun varr;
varr
let gaussian kind ?(mu=0.) ?(sigma=1.) dims =
let gaussian_gen_fun = (fun _ -> Owl_base_stats.gaussian_rvs ~mu ~sigma) in
let varr = empty kind dims in
_apply_fun gaussian_gen_fun varr;
varr
let print ?max_row ?max_col ? ?fmt varr =
let dims = shape varr in
let rank = Array.length dims in
let n = dims.(rank - 1) in
let max_row = match max_row with
| Some a -> Some a
| None -> Some ((numel varr) / n)
in
let max_col = match max_col with
| Some a -> Some a
| None -> Some n
in
Owl_pretty.print_dsnda ?max_row ?max_col ?header ?elt_to_str_fun:fmt varr
let tile varr reps =
let dims = shape varr in
let result_rank = Pervasives.max (Array.length dims) (Array.length reps) in
let dims = _prepend_dims dims result_rank in
let reps = _prepend_dims reps result_rank in
let varr = reshape varr dims in
let result_dims = Array.map2 (fun a b -> a * b) dims reps in
let result_varr = empty (kind varr) result_dims in
let result_ind = Array.make result_rank 0 in
let original_ind = Array.make result_rank 0 in
let should_stop = ref false in
begin
while not !should_stop do
for i = 0 to result_rank - 1 do
original_ind.(i) <- (Pervasives.(mod) result_ind.(i) dims.(i))
done;
Genarray.set result_varr result_ind (Genarray.get varr original_ind);
if not (_next_index result_ind result_dims) then
should_stop := true
done;
result_varr
end
let split ?(axis=0) parts varr =
let dims = shape varr in
let rank = Array.length dims in
let pos = ref 0 in
let axis_indices = Array.map (fun d -> (pos := !pos + d; [!pos - d; !pos - 1])) parts in
let slices_defs =
Array.map (fun ind ->
Array.to_list (Array.init rank
(fun i -> if i = axis then ind else [])))
axis_indices
in
(Array.map (fun def -> get_slice def varr) slices_defs)
let draw ?(axis=0) varr count =
let dims = shape varr in
let rank = Array.length dims in
let indices = _draw_int_samples false dims.(axis) count in
(get_slice
(List.init rank
(fun i -> if i = axis then (Array.to_list indices) else []))
varr,
indices)
let concatenate ?(axis=0) varrs =
let varrs_num = Array.length varrs in
let all_dims = Array.map shape varrs in
let prefix_dims = Array.sub all_dims.(0) 0 axis in
let sum_axis_dims = Array.fold_left (fun x a -> x + a.(axis)) 0 all_dims in
let suffix_dims = Array.sub all_dims.(0)
(axis + 1) ((Array.length all_dims.(0)) - axis - 1)
in
let result_dims =
Array.concat [prefix_dims; [|sum_axis_dims|]; suffix_dims]
in
let result_varr = empty (kind varrs.(0)) result_dims in
let prefix_dims_product = Array.fold_left ( * ) 1 prefix_dims in
let suffix_dims_product = Array.fold_left ( * ) 1 suffix_dims in
let reshaper_fun = (
fun varr ->
let old_shape = shape varr in
let new_shape =
[|prefix_dims_product; old_shape.(axis) * suffix_dims_product|]
in
reshape varr new_shape
) in
let reshaped_result = reshaper_fun result_varr in
let reshaped_varrs = Array.map reshaper_fun varrs in
begin
for i = 0 to prefix_dims_product - 1 do
let start_index = ref 0 in
let result_slice = Genarray.slice_left reshaped_result [|i|] in
for j = 0 to varrs_num - 1 do
let src_slice = Genarray.slice_left reshaped_varrs.(j) [|i|] in
let block_len = all_dims.(j).(axis) * suffix_dims_product in
let result_sub =
Genarray.sub_left result_slice !start_index block_len in
Genarray.blit src_slice result_sub;
start_index := !start_index + block_len
done
done;
result_varr
end
let repeat ?(axis=0) varr reps =
let varrs = Array.make reps varr in
(concatenate ~axis:axis varrs)
let abs varr = (map Scalar.abs varr)
let neg varr = (map Scalar.neg varr)
let floor varr = (map Scalar.floor varr)
let ceil varr = (map Scalar.ceil varr)
let round varr = (map Scalar.round varr)
let sqr varr = (map Scalar.sqr varr)
let sqrt varr = (map Scalar.sqrt varr)
let log varr = (map Scalar.log varr)
let log2 varr = (map Scalar.log2 varr)
let log10 varr = (map Scalar.log10 varr)
let exp varr = (map Scalar.exp varr)
let sin varr = (map Scalar.sin varr)
let cos varr = (map Scalar.cos varr)
let tan varr = (map Scalar.tan varr)
let tan varr = (map Scalar.tan varr)
let sinh varr = (map Scalar.sinh varr)
let cosh varr = (map Scalar.cosh varr)
let tanh varr = (map Scalar.tanh varr)
let asin varr = (map Scalar.asin varr)
let acos varr = (map Scalar.acos varr)
let atan varr = (map Scalar.atan varr)
let asinh varr = (map Scalar.asinh varr)
let acosh varr = (map Scalar.acosh varr)
let atanh varr = (map Scalar.atanh varr)
let sum_slices ?(axis=0) varr =
let dims = shape varr in
let rank = Array.length dims in
let num_rows = Array.fold_left ( * ) 1 (Array.sub dims 0 (axis + 1)) in
let num_cols = (numel varr) / num_rows in
let varr_mat = reshape varr [|num_rows; num_cols|] in
let result_vec = empty (kind varr) [|num_cols|] in
let result_varr = reshape result_vec
(Array.sub dims (axis + 1) (rank - axis - 1))
in
let row_sum = ref 0. in
begin
for j = 0 to num_cols - 1 do
row_sum := 0.;
for i = 0 to num_rows - 1 do
row_sum := !row_sum +. (Genarray.get varr_mat [|i; j|])
done;
Genarray.set result_vec [|j|] !row_sum
done;
result_varr
end
let signum varr = (map Scalar.signum varr)
let sigmoid varr = (map Scalar.sigmoid varr)
let relu varr = (map Scalar.relu varr)
let _fold_left f a varr =
let aref = ref a in
let varr_linear = flatten varr |> array1_of_genarray in
let length = numel varr in
begin
for i = 0 to length - 1 do
aref := (f !aref (Array1.unsafe_get varr_linear i))
done;
!aref
end
let min' varr = (_fold_left (Pervasives.min) Pervasives.max_float varr)
let max' varr = (_fold_left (Pervasives.max) Pervasives.min_float varr)
let sum' varr =
let _kind = kind varr in
_fold_left (Owl_base_dense_common._add_elt _kind) (Owl_const.zero _kind) varr
let fold_along f m n o x ys =
let x = flatten x in
let y = zeros (kind x) ys |> flatten in
let idx = ref 0 in
let idy = ref 0 in
let incy = ref 0 in
for i = 0 to (m - 1) do
for j = 0 to (n - 1) do
let addon = Genarray.get x [|!idx + j|] in
let orig = Genarray.get y [|!idy + !incy|] in
Genarray.set y [|!idy + !incy|] (f orig addon);
incy := if (!incy + 1 = o) then 0 else !incy + 1
done;
idx := !idx + n;
idy := !idy + o;
done;
reshape y ys
let sum ?axis x =
let _kind = kind x in
match axis with
| Some a -> (
let m, n, o, s = Owl_utils.reduce_params a x in
fold_along (Owl_base_dense_common._add_elt _kind) m n o x s
)
| None -> create (kind x) (Array.make 1 1) (sum' x)
let sum_reduce ?axis x =
let _kind = kind x in
let _dims = num_dims x in
match axis with
| Some a -> (
let y = ref x in
Array.iter (fun i ->
assert (i < _dims);
let m, n, o, s = Owl_utils.reduce_params i !y in
y := fold_along (Owl_base_dense_common._add_elt _kind) m n o !y s
) a;
!y
)
| None -> create (kind x) (Array.make _dims 1) (sum' x)
let min ?axis x = failwith "not implemented"
let max ?axis x = failwith "not implemented"
let l1norm' varr =
let l1norm_fun =
(fun aggregate elem -> (aggregate +. (Scalar.abs (elem)))) in
(_fold_left l1norm_fun 0. varr)
let l2norm_sqr' varr =
let l2norm_sqr_fun =
(fun aggregate elem -> (aggregate +. (elem *. elem))) in
(_fold_left l2norm_sqr_fun 0. varr)
let l2norm' varr =
let l2norm_sqr_val = l2norm_sqr' varr in
(Scalar.sqrt l2norm_sqr_val)
let scalar_pow a varr =
let scalar_pow_fun = (fun x -> (a ** x)) in
(map scalar_pow_fun varr)
let pow_scalar varr a =
let pow_scalar_fun = (fun x -> (x ** a)) in
(map pow_scalar_fun varr)
let scalar_atan2 a varr =
let scalar_atan2_fun = (fun x -> (Scalar.atan2 a x)) in
(map scalar_atan2_fun varr)
let atan2_scalar varr a =
let atan2_scalar_fun = (fun x -> (Scalar.atan2 x a)) in
(map atan2_scalar_fun varr)
let _broadcasted_op varr_a varr_b op_fun =
let (dims_a, dims_b, dims_c) =
_get_broadcasted_dims (shape varr_a) (shape varr_b)
in
let _kind = kind varr_a in
let varr_a = reshape varr_a dims_a in
let varr_b = reshape varr_b dims_b in
let varr_c = empty _kind dims_c in
let ind = Array.make (Array.length dims_c) 0 in
let should_stop = ref false in
begin
while not !should_stop do
let ind_a = _get_broadcasted_index ind dims_a in
let ind_b = _get_broadcasted_index ind dims_b in
Genarray.set varr_c ind
(op_fun (Genarray.get varr_a ind_a) (Genarray.get varr_b ind_b));
if not (_next_index ind dims_c) then
should_stop := true
done;
varr_c
end
let add varr_a varr_b =
let _op = Owl_base_dense_common._add_elt (kind varr_a) in
_broadcasted_op varr_a varr_b _op
let sub varr_a varr_b =
let _op = Owl_base_dense_common._sub_elt (kind varr_a) in
_broadcasted_op varr_a varr_b _op
let mul varr_a varr_b =
let _op = Owl_base_dense_common._mul_elt (kind varr_a) in
_broadcasted_op varr_a varr_b _op
let div varr_a varr_b =
let _op = Owl_base_dense_common._div_elt (kind varr_a) in
_broadcasted_op varr_a varr_b _op
let atan2 varr_a varr_b = (_broadcasted_op varr_a varr_b (Scalar.atan2))
let pow varr_a varr_b = (_broadcasted_op varr_a varr_b ( ** ))
let add_scalar varr a =
let _op = Owl_base_dense_common._add_elt (kind varr) in
let add_scalar_fun = (fun x -> _op x a) in
(map add_scalar_fun varr)
let sub_scalar varr a =
let _op = Owl_base_dense_common._sub_elt (kind varr) in
let sub_scalar_fun = (fun x -> _op x a) in
(map sub_scalar_fun varr)
let mul_scalar varr a =
let _op = Owl_base_dense_common._mul_elt (kind varr) in
let mul_scalar_fun = (fun x -> _op x a) in
(map mul_scalar_fun varr)
let div_scalar varr a =
let _op = Owl_base_dense_common._div_elt (kind varr) in
let div_scalar_fun = (fun x -> _op x a) in
(map div_scalar_fun varr)
let clip_by_value ?(amin=Pervasives.min_float) ?(amax=Pervasives.max_float) varr =
let clip_by_val_fun = (fun x -> Pervasives.min amax (Pervasives.max amin x)) in
(map clip_by_val_fun varr)
let scalar_add a varr = (add_scalar varr a)
let scalar_sub a varr =
let _op = Owl_base_dense_common._sub_elt (kind varr) in
let scalar_sub_fun = (fun x -> _op a x) in
(map scalar_sub_fun varr)
let scalar_mul a varr = (mul_scalar varr a)
let scalar_div a varr =
let _op = Owl_base_dense_common._div_elt (kind varr) in
let scalar_div_fun = (fun x -> _op a x) in
(map scalar_div_fun varr)
let clip_by_l2norm clip_norm varr =
let l2norm_val = l2norm' varr in
if l2norm_val > clip_norm
then mul_scalar varr (clip_norm /. l2norm_val)
else varr
(** Return true if for all elements comp_fun (xa, xb) == true, false otherwise.
Returns false as soon as it finds a counterexample. (NOT broadcasted) *)
let _compare_util_shortcircuit varr_a varr_b comp_fun =
let n = numel varr_a in
let m = numel varr_b in
if n != m
then false
else
let varr_a = flatten varr_a |> array1_of_genarray in
let varr_b = flatten varr_b |> array1_of_genarray in
let all_ok = ref true in
let i = ref 0 in
begin
while !all_ok && (!i < n) do
let x = Array1.unsafe_get varr_a !i in
let y = Array1.unsafe_get varr_b !i in
if (not (comp_fun x y))
then all_ok := false;
i := !i + 1
done;
!all_ok
end
let approx_equal ?eps varr_a varr_b =
let eps = match eps with
| Some eps -> eps
| None -> Owl_utils.eps Float32
in
let approx_equal_fun = (fun x y -> (Scalar.abs (Scalar.sub x y)) < eps) in
(_compare_util_shortcircuit varr_a varr_b approx_equal_fun)
let equal varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(=))
let not_equal varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(<>))
let less varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(<))
let greater varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(>))
let less_equal varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(<=))
let greater_equal varr_a varr_b =
(_compare_util_shortcircuit varr_a varr_b Pervasives.(>=))
(** Return true if for all elements of a comp_fun (xa, bb) == true, false otherwise.
Returns false as soon as it finds a counterexample. (NOT broadcasted) *)
let _compare_util_shortcircuit_scalar varr_a b comp_fun =
let n = numel varr_a in
let varr_a = flatten varr_a |> array1_of_genarray in
let all_ok = ref true in
let i = ref 0 in
begin
while !all_ok && (!i < n) do
let x = Array1.unsafe_get varr_a !i in
if (not (comp_fun x b))
then all_ok := false;
i := !i + 1
done;
!all_ok
end
let approx_equal_scalar ?eps varr_a b =
let eps = match eps with
| Some eps -> eps
| None -> Owl_utils.eps Float32
in
let approx_equal_scalar_fun = (fun x y -> (Scalar.abs (Scalar.sub x y)) < eps) in
(_compare_util_shortcircuit_scalar varr_a b approx_equal_scalar_fun)
let equal_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(=))
let not_equal_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(<>))
let less_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(<))
let greater_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(>))
let less_equal_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(<=))
let greater_equal_scalar varr_a b =
(_compare_util_shortcircuit_scalar varr_a b Pervasives.(>=))
let _elt_compare_util varr_a varr_b one_fun =
let _kind = kind varr_a in
let c0 = Owl_const.zero _kind in
let c1 = Owl_const.one _kind in
let comp_fun = (fun x y -> if (one_fun x y) then c1 else c0) in
(_broadcasted_op varr_a varr_b comp_fun)
let elt_equal varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(=))
let approx_elt_equal ?eps varr_a varr_b =
let eps = match eps with
| Some eps -> eps
| None -> Owl_utils.eps Float32
in
let approx_equal_fun = (fun x y -> (Scalar.abs (Scalar.sub x y)) < eps) in
(_elt_compare_util varr_a varr_b approx_equal_fun)
let elt_not_equal varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(<>))
let elt_less varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(<))
let elt_greater varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(>))
let elt_less_equal varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(<=))
let elt_greater_equal varr_a varr_b =
(_elt_compare_util varr_a varr_b Pervasives.(>=))
let _elt_compare_scalar_util varr_a one_fun =
let _kind = kind varr_a in
let c0 = Owl_const.zero _kind in
let c1 = Owl_const.one _kind in
let comp_fun = (fun x -> if one_fun x then c1 else c0) in
(map comp_fun varr_a)
let elt_equal_scalar varr_a b =
let equal_scalar_fun = (fun x -> x = b) in
(_elt_compare_scalar_util varr_a equal_scalar_fun)
let approx_elt_equal_scalar ?eps varr_a b =
let eps = match eps with
| Some eps -> eps
| None -> Owl_utils.eps Float32
in
let approx_equal_scalar_fun = (fun x -> (Scalar.abs (Scalar.sub x b)) < eps) in
(_elt_compare_scalar_util varr_a approx_equal_scalar_fun)
let elt_not_equal_scalar varr_a b =
let not_equal_scalar_fun = (fun x -> x <> b) in
(_elt_compare_scalar_util varr_a not_equal_scalar_fun)
let elt_less_scalar varr_a b =
let less_scalar_fun = (fun x -> x < b) in
(_elt_compare_scalar_util varr_a less_scalar_fun)
let elt_greater_scalar varr_a b =
let greater_scalar_fun = (fun x -> x > b) in
(_elt_compare_scalar_util varr_a greater_scalar_fun)
let elt_less_equal_scalar varr_a b =
let less_equal_scalar_fun = (fun x -> x <= b) in
(_elt_compare_scalar_util varr_a less_equal_scalar_fun)
let elt_greater_equal_scalar varr_a b =
let greater_equal_scalar_fun = (fun x -> x > b) in
(_elt_compare_scalar_util varr_a greater_equal_scalar_fun)
let exists f varr =
let n = numel varr in
let varr = flatten varr |> array1_of_genarray in
let found = ref false in
let i = ref 0 in
begin
while (!i < n) && (not !found) do
let x = Array1.unsafe_get varr !i in
if f x
then found := true;
i := !i + 1
done;
!found
end
let not_exists f varr = (not (exists f varr))
let for_all f varr =
let not_f = (fun x -> not (f x)) in
(not_exists not_f varr)
let is_zero varr =
let k = kind varr in
let c0 = Owl_const.zero k in
let non_zero_fun = (fun x -> x <> c0) in
(not_exists non_zero_fun varr)
let is_positive varr =
let k = kind varr in
let c0 = Owl_const.zero k in
let non_positive_fun = (fun x -> x <= c0) in
(not_exists non_positive_fun varr)
let is_negative varr =
let k = kind varr in
let c0 = Owl_const.zero k in
let non_negative_fun = (fun x -> x >= c0) in
(not_exists non_negative_fun varr)
let is_nonpositive varr =
let k = kind varr in
let c0 = Owl_const.zero k in
let positive_fun = (fun x -> x > c0) in
(not_exists positive_fun varr)
let is_nonnegative varr =
let k = kind varr in
let c0 = Owl_const.zero k in
let negative_fun = (fun x -> x < c0) in
(not_exists negative_fun varr)
let is_normal varr =
let not_normal_fun = (
fun x -> match Pervasives.classify_float x with
| FP_subnormal -> true
| FP_infinite -> true
| FP_nan -> true
| _ -> false
) in
(not_exists not_normal_fun varr)
let not_nan varr =
let is_nan_fun = (
fun x -> match Pervasives.classify_float x with
| FP_nan -> true
| _ -> false
) in
(not_exists is_nan_fun varr)
let not_inf varr =
let is_inf_fun = (
fun x -> match Pervasives.classify_float x with
| FP_infinite -> true
| _ -> false
) in
(not_exists is_inf_fun varr)
let conv2d ?(padding=SAME) input kernel stride =
assert (num_dims input = 4);
assert (num_dims kernel = 4);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let out_channel = kernel_shp.(3) in
assert (in_channel = kernel_shp.(2));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let (output_cols, output_rows) =
Owl_utils_conv.calc_conv2d_output_shape padding input_cols input_rows
kernel_cols kernel_rows row_stride col_stride
in
let _kind = kind input in
let output = empty _kind [|batches; output_cols; output_rows; out_channel|] in
let (pad_top, pad_left, _, _) = Owl_utils_conv.calc_conv2d_padding
input_cols input_rows kernel_cols kernel_rows output_cols output_rows
row_stride col_stride
in
let sum = ref 0. in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for k = 0 to out_channel - 1 do
sum := 0.;
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for q = 0 to in_channel - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_val = (
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows))
then (get input [|b; in_col; in_row; q|])
else 0.
) in
sum := !sum +. in_val *. (get kernel [|di; dj; q; k|])
done;
done;
done;
(set output [|b; i; j; k|] !sum)
done;
done;
done;
done;
output
end
let conv1d ?(padding=SAME) input kernel stride =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; 1; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel = reshape kernel [|1; kernel_cols; in_channel; out_channel|] in
let col_stride = stride.(0) in
let stride = [|1; col_stride|] in
let output = conv2d ~padding input kernel stride in
let output_shp = shape output in
let output_cols = output_shp.(2) in
let output = reshape output [|batches; output_cols; out_channel|] in
output
let conv3d ?(padding=SAME) input kernel stride =
assert (num_dims input = 5);
assert (num_dims kernel = 5);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let kernel_dpts = kernel_shp.(2) in
let out_channel = kernel_shp.(4) in
assert (in_channel = kernel_shp.(3));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let output_cols, output_rows, output_dpts =
Owl_utils_conv.calc_conv3d_output_shape padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
row_stride col_stride dpt_stride
in
let _kind = kind input in
let output =
empty _kind [|batches; output_cols; output_rows; output_dpts; out_channel|] in
let (pad_top, pad_left, pad_shallow, _, _, _) =
Owl_utils_conv.calc_conv3d_padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
output_cols output_rows output_dpts
row_stride col_stride dpt_stride
in
let sum = ref 0. in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for dpt = 0 to output_dpts - 1 do
for k = 0 to out_channel - 1 do
sum := 0.;
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for d_dpt = 0 to kernel_dpts -1 do
for q = 0 to in_channel - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_dpt = dpt * dpt_stride + d_dpt - pad_shallow in
let in_val = (
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows) &&
(0 <= in_dpt) && (in_dpt < input_dpts))
then (get input [|b; in_col; in_row; in_dpt; q|])
else 0.
) in
sum := !sum +. in_val *. (get kernel [|di; dj; d_dpt; q; k|])
done;
done;
done;
done;
(set output [|b; i; j; dpt; k|] !sum)
done;
done;
done;
done;
done;
output
end
let _pool2d ?(padding=SAME) input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun =
assert (num_dims input = 4);
assert (Array.length kernel = 2);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_cols = kernel.(0) in
let kernel_rows = kernel.(1) in
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let (output_cols, output_rows) =
Owl_utils_conv.calc_conv2d_output_shape padding
input_cols input_rows
kernel_cols kernel_rows
row_stride col_stride
in
let _kind = kind input in
let output = empty _kind [|batches; output_cols; output_rows; in_channel|] in
let (pad_top, pad_left, _, _) = Owl_utils_conv.calc_conv2d_padding
input_cols input_rows kernel_cols kernel_rows output_cols output_rows
row_stride col_stride
in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for k = 0 to in_channel - 1 do
init_pool_fun ();
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows))
then add_val_pool_fun (get input [|b; in_col; in_row; k|])
done;
done;
(set output [|b; i; j; k|] (end_pool_fun ()))
done;
done;
done;
done;
output
end
let _pool3d ?(padding=SAME) input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun =
assert (num_dims input = 5);
assert (Array.length kernel = 3);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_cols = kernel.(0) in
let kernel_rows = kernel.(1) in
let kernel_dpts = kernel.(2) in
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let output_cols, output_rows, output_dpts =
Owl_utils_conv.calc_conv3d_output_shape padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
row_stride col_stride dpt_stride
in
let _kind = kind input in
let output = empty _kind [|batches; output_cols; output_rows; output_dpts; in_channel|] in
let (pad_top, pad_left, pad_shallow, _, _, _) =
Owl_utils_conv.calc_conv3d_padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
output_cols output_rows output_dpts
row_stride col_stride dpt_stride
in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for dpt = 0 to output_dpts - 1 do
for k = 0 to in_channel - 1 do
init_pool_fun ();
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for d_dpt = 0 to kernel_dpts - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_dpt = dpt * dpt_stride + d_dpt - pad_shallow in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows) &&
(0 <= in_dpt) && (in_dpt < input_dpts))
then add_val_pool_fun
(get input [|b; in_col; in_row; in_dpt; k|])
done;
done;
done;
(set output [|b; i; j; dpt; k|] (end_pool_fun ()))
done;
done;
done;
done;
done;
output
end
let max_pool2d ?(padding=SAME) input kernel stride =
let max_pool = ref 0. in
let init_pool_fun = (fun () -> max_pool := Pervasives.min_float) in
let add_val_pool_fun =
(fun v -> max_pool := Pervasives.max !max_pool v)
in
let end_pool_fun = (fun () -> !max_pool) in
(_pool2d ~padding:padding input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun)
let max_pool1d ?(padding=SAME) input kernel stride =
assert (num_dims input = 3);
assert (Array.length kernel = 1);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; 1; input_cols; in_channel|] in
let kernel_cols = kernel.(0) in
let kernel = [|1; kernel_cols|] in
let col_stride = stride.(0) in
let stride = [|1; col_stride|] in
let output = max_pool2d ~padding input kernel stride in
let output_shp = shape output in
let output_cols = output_shp.(2) in
let output = reshape output [|batches; output_cols; in_channel|] in
output
let max_pool3d ?(padding=SAME) input kernel stride =
let max_pool = ref 0. in
let init_pool_fun = (fun () -> max_pool := Pervasives.min_float) in
let add_val_pool_fun =
(fun v -> max_pool := Pervasives.max !max_pool v)
in
let end_pool_fun = (fun () -> !max_pool) in
(_pool3d ~padding:padding input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun)
let avg_pool2d ?(padding=SAME) input kernel stride =
let sum_pool = ref 0. in
let cnt = ref 0. in
let init_pool_fun = (fun () -> (sum_pool := 0.; cnt := 0.)) in
let add_val_pool_fun =
(fun v -> sum_pool := !sum_pool +. v; cnt := !cnt +. 1.)
in
let end_pool_fun = (fun () -> (!sum_pool /. !cnt)) in
(_pool2d ~padding:padding input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun)
let avg_pool1d ?(padding=SAME) input kernel stride =
assert (num_dims input = 3);
assert (Array.length kernel = 1);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; 1; input_cols; in_channel|] in
let kernel_cols = kernel.(0) in
let kernel = [|1; kernel_cols|] in
let col_stride = stride.(0) in
let stride = [|1; col_stride|] in
let output = avg_pool2d ~padding input kernel stride in
let output_shp = shape output in
let output_cols = output_shp.(2) in
let output = reshape output [|batches; output_cols; in_channel|] in
output
let avg_pool3d ?(padding=SAME) input kernel stride =
let sum_pool = ref 0. in
let cnt = ref 0. in
let init_pool_fun = (fun () -> (sum_pool := 0.; cnt := 0.)) in
let add_val_pool_fun =
(fun v -> sum_pool := !sum_pool +. v; cnt := !cnt +. 1.)
in
let end_pool_fun = (fun () -> (!sum_pool /. !cnt)) in
(_pool3d ~padding:padding input kernel stride
init_pool_fun add_val_pool_fun end_pool_fun)
let conv2d_backward_input input kernel stride output' =
assert (num_dims input = 4);
assert (num_dims kernel = 4);
assert (num_dims output' = 4);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let out_channel = kernel_shp.(3) in
assert (in_channel = kernel_shp.(2));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(3));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let input' = empty (kind input) (shape input) in
let (pad_top, pad_left, _, _) = Owl_utils_conv.calc_conv2d_padding
input_cols input_rows kernel_cols kernel_rows output_cols output_rows
row_stride col_stride
in
begin
for b = 0 to batches - 1 do
for in_i = 0 to input_cols - 1 do
for in_j = 0 to input_rows - 1 do
for q = 0 to in_channel - 1 do
let sum = ref 0. in
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
if ( ((Pervasives.(mod) (in_i + pad_left - di) col_stride) = 0) &&
((Pervasives.(mod) (in_j + pad_top - dj) row_stride) = 0) )
then
begin
let out_col = (in_i + pad_left - di) / col_stride in
let out_row = (in_j + pad_top - dj) / row_stride in
if ((0 <= out_col) && (out_col < output_cols) &&
(0 <= out_row) && (out_row < output_rows))
then
for k = 0 to out_channel - 1 do
let out_grad = get output' [|b; out_col; out_row; k|] in
let kernel_val = get kernel [|di; dj; q; k|] in
sum := !sum +. out_grad *. kernel_val
done;
end
done;
done;
(set input' [|b; in_i; in_j; q|] !sum)
done;
done;
done;
done;
input'
end
let conv2d_backward_kernel input kernel stride output' =
assert (num_dims input = 4);
assert (num_dims kernel = 4);
assert (num_dims output' = 4);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let out_channel = kernel_shp.(3) in
assert (in_channel = kernel_shp.(2));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(3));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let kernel' = empty (kind kernel) (shape kernel) in
let (pad_top, pad_left, _, _) = Owl_utils_conv.calc_conv2d_padding
input_cols input_rows kernel_cols kernel_rows output_cols output_rows
row_stride col_stride
in
begin
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for q = 0 to in_channel - 1 do
for k = 0 to out_channel - 1 do
let sum = ref 0. in
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows))
then
let out_grad = get output' [|b; i; j; k|] in
let input_val = get input [|b; in_col; in_row; q|] in
sum := !sum +. out_grad *. input_val
done;
done;
done;
set kernel' [|di; dj; q; k|] !sum
done;
done;
done;
done;
kernel'
end
let transpose ?axis varr =
let dims = shape varr in
let rank = Array.length dims in
let axis_perm = match axis with
| Some perm -> perm
| None -> Array.init rank (fun i -> rank - i - 1)
in
let new_dims = _apply_perm dims axis_perm in
let new_varr = empty (kind varr) new_dims in
let ind = Array.make rank 0 in
let should_stop = ref false in
begin
while not !should_stop do
Genarray.set new_varr
(_apply_perm ind axis_perm) (Genarray.get varr ind);
if not (_next_index ind dims) then
should_stop := true
done;
new_varr
end
let transpose_conv2d ?(padding=SAME) input kernel stride =
assert (num_dims input = 4);
assert (num_dims kernel = 4);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let out_channel = kernel_shp.(3) in
assert (in_channel = kernel_shp.(2));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let output_cols, output_rows = Owl_utils.calc_transpose_conv2d_output_shape
padding input_cols input_rows kernel_cols kernel_rows
row_stride col_stride
in
let output' = empty (kind input) [|batches; output_cols; output_rows;
out_channel|]
in
let kernel = transpose ~axis:[|0;1;3;2|] kernel in
conv2d_backward_input output' kernel stride input
let transpose_conv2d_backward_input input kernel stride output' =
assert (num_dims input = 4);
assert (num_dims kernel = 4);
assert (num_dims output' = 4);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let out_channel = kernel_shp.(3) in
assert (in_channel = kernel_shp.(2));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(3));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let padding = SAME in
let output_cols_same, output_rows_same =
Owl_utils.calc_transpose_conv2d_output_shape
padding input_cols input_rows kernel_cols kernel_rows
row_stride col_stride
in
let p = if ((output_cols_same = output_cols)
&& (output_rows_same = output_rows) ) then SAME else VALID
in
let kernel = transpose ~axis:[|0;1;3;2|] kernel in
conv2d ~padding:p output' kernel stride
let transpose_conv2d_backward_kernel input kernel stride output' =
conv2d_backward_kernel output' kernel stride input
let transpose_conv1d ?(padding=SAME) input kernel stride =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; 1; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel = reshape kernel [|1; kernel_cols; in_channel; out_channel|] in
let col_stride = stride.(0) in
let stride = [|1; col_stride|] in
let output = transpose_conv2d ~padding input kernel stride in
let output_shp = shape output in
let output_cols = output_shp.(2) in
let output = reshape output [|batches; output_cols; out_channel|] in
output
let conv1d_backward_input input kernel stride output' =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (num_dims output' = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input_rows = 1 in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel_rows = 1 in
let kernel = reshape kernel [|kernel_rows; kernel_cols; in_channel; out_channel|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
assert (batches = output'_shp.(0));
assert (out_channel = output'_shp.(2));
let output_rows = 1 in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let input' = conv2d_backward_input input kernel stride output' in
reshape input' input_shp
let conv1d_backward_kernel input kernel stride output' =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (num_dims output' = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input_rows = 1 in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel_rows = 1 in
let kernel = reshape kernel [|kernel_rows; kernel_cols; in_channel; out_channel|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
assert (batches = output'_shp.(0));
assert (out_channel = output'_shp.(2));
let output_rows = 1 in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let kernel' = conv2d_backward_kernel input kernel stride output' in
reshape kernel' kernel_shp
let transpose_conv1d_backward_input input kernel stride output' =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (num_dims output' = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input_rows = 1 in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel_rows = 1 in
let kernel = reshape kernel [|kernel_rows; kernel_cols; in_channel; out_channel|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
assert (batches = output'_shp.(0));
assert (out_channel = output'_shp.(2));
let output_rows = 1 in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let input' = transpose_conv2d_backward_input input kernel stride output' in
reshape input' input_shp
let transpose_conv1d_backward_kernel input kernel stride output' =
assert (num_dims input = 3);
assert (num_dims kernel = 3);
assert (num_dims output' = 3);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let in_channel = input_shp.(2) in
let input_rows = 1 in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let out_channel = kernel_shp.(2) in
assert (in_channel = kernel_shp.(1));
let kernel_rows = 1 in
let kernel = reshape kernel [|kernel_rows; kernel_cols; in_channel; out_channel|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
assert (batches = output'_shp.(0));
assert (out_channel = output'_shp.(2));
let output_rows = 1 in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let kernel' = transpose_conv2d_backward_kernel input kernel stride output' in
reshape kernel' kernel_shp
let conv3d_backward_input input kernel stride output' =
assert (num_dims input = 5);
assert (num_dims kernel = 5);
assert (num_dims output' = 5);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let kernel_dpts = kernel_shp.(2) in
let out_channel = kernel_shp.(4) in
assert (in_channel = kernel_shp.(3));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
let output_dpts = output_shp.(3) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(4));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let input' = empty (kind input) (shape input) in
let (pad_top, pad_left, pad_shallow, _, _, _) =
Owl_utils_conv.calc_conv3d_padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
output_cols output_rows output_dpts
row_stride col_stride dpt_stride
in
begin
for b = 0 to batches - 1 do
for in_i = 0 to input_cols - 1 do
for in_j = 0 to input_rows - 1 do
for in_dpt = 0 to input_dpts - 1 do
for q = 0 to in_channel - 1 do
let sum = ref 0. in
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for d_dpt = 0 to kernel_dpts - 1 do
if ( ((Pervasives.(mod) (in_i + pad_left - di) col_stride) = 0) &&
((Pervasives.(mod) (in_j + pad_top - dj) row_stride) = 0) &&
((Pervasives.(mod) (in_dpt + pad_shallow - d_dpt) dpt_stride) = 0))
then
begin
let out_col = (in_i + pad_left - di) / col_stride in
let out_row = (in_j + pad_top - dj) / row_stride in
let out_dpt = (in_dpt + pad_shallow - d_dpt) / dpt_stride in
if ((0 <= out_col) && (out_col < output_cols) &&
(0 <= out_row) && (out_row < output_rows) &&
(0 <= out_dpt) && (out_dpt < output_dpts))
then
for k = 0 to out_channel - 1 do
let out_grad = get output' [|b; out_col; out_row; out_dpt; k|] in
let kernel_val = get kernel [|di; dj; d_dpt; q; k|] in
sum := !sum +. out_grad *. kernel_val
done;
end
done;
done;
done;
(set input' [|b; in_i; in_j; in_dpt; q|] !sum)
done;
done;
done;
done;
done;
input'
end
let conv3d_backward_kernel input kernel stride output' =
assert (num_dims input = 5);
assert (num_dims kernel = 5);
assert (num_dims output' = 5);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let kernel_dpts = kernel_shp.(2) in
let out_channel = kernel_shp.(4) in
assert (in_channel = kernel_shp.(3));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
let output_dpts = output_shp.(3) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(4));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let kernel' = empty (kind kernel) (shape kernel) in
let (pad_top, pad_left, pad_shallow, _, _, _) =
Owl_utils_conv.calc_conv3d_padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
output_cols output_rows output_dpts
row_stride col_stride dpt_stride
in
begin
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for d_dpt = 0 to kernel_dpts - 1 do
for q = 0 to in_channel - 1 do
for k = 0 to out_channel - 1 do
let sum = ref 0. in
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for dpt = 0 to output_dpts - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_dpt = dpt * dpt_stride + d_dpt - pad_shallow in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows) &&
(0 <= in_dpt) && (in_dpt < input_dpts))
then
let out_grad = get output' [|b; i; j; dpt; k|] in
let input_val = get input [|b; in_col; in_row; in_dpt; q|] in
sum := !sum +. out_grad *. input_val
done;
done;
done;
done;
set kernel' [|di; dj; d_dpt; q; k|] !sum
done;
done;
done;
done;
done;
kernel'
end
let transpose_conv3d ?(padding=SAME) input kernel stride =
assert (num_dims input = 5);
assert (num_dims kernel = 5);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let kernel_dpts = kernel_shp.(2) in
let out_channel = kernel_shp.(4) in
assert (in_channel = kernel_shp.(3));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let output_cols, output_rows, output_dpts =
Owl_utils.calc_transpose_conv3d_output_shape padding input_cols input_rows input_dpts kernel_cols kernel_rows kernel_dpts row_stride col_stride dpt_stride
in
let output = empty (kind input) [|batches; output_cols; output_rows; output_dpts; out_channel|] in
let kernel = transpose ~axis:[|0;1;2;4;3|] kernel in
conv3d_backward_input output kernel stride input
let transpose_conv3d_backward_input input kernel stride output' =
assert (num_dims input = 5);
assert (num_dims kernel = 5);
assert (num_dims output' = 5);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_shp = shape kernel in
let kernel_cols = kernel_shp.(0) in
let kernel_rows = kernel_shp.(1) in
let kernel_dpts = kernel_shp.(2) in
let out_channel = kernel_shp.(4) in
assert (in_channel = kernel_shp.(3));
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
let output_dpts = output_shp.(3) in
assert (batches = output_shp.(0));
assert (out_channel = output_shp.(4));
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let padding = SAME in
let output_cols_same, output_rows_same, output_dpts_same =
Owl_utils.calc_transpose_conv3d_output_shape padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
row_stride col_stride dpt_stride
in
let p = if ((output_cols_same = output_cols)
&& (output_rows_same = output_rows)
&& (output_dpts_same = output_dpts)) then SAME else VALID
in
let kernel = transpose ~axis:[|0;1;2;4;3|] kernel in
conv3d ~padding:p output' kernel stride
let transpose_conv3d_backward_kernel input kernel stride output' =
conv3d_backward_kernel output' kernel stride input
let _pool2d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun =
assert (num_dims input = 4);
assert (Array.length kernel = 2);
assert (Array.length stride = 2);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let in_channel = input_shp.(3) in
let kernel_cols = kernel.(0) in
let kernel_rows = kernel.(1) in
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
assert (batches = output_shp.(0));
assert (in_channel = output_shp.(3));
let (pad_top, pad_left, _, _) = Owl_utils_conv.calc_conv2d_padding
input_cols input_rows kernel_cols kernel_rows output_cols output_rows
row_stride col_stride
in
let input' = zeros (kind input) (shape input) in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for k = 0 to in_channel - 1 do
init_pool_fun ();
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows))
then add_val_pool_fun (get input [|b; in_col; in_row; k|])
done;
done;
let output_val = end_pool_fun () in
let output_grad = get output' [|b; i; j; k|] in
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows))
then
let input_val = (get input [|b; in_col; in_row; k|]) in
let input_grad = (get input' [|b; in_col; in_row; k|]) in
set input' [|b; in_col; in_row; k|] (compute_grad_fun input_val input_grad output_val output_grad)
done;
done;
done;
done;
done;
done;
input'
end
let max_pool2d_backward padding input kernel stride output' =
let max_pool = ref 0. in
let init_pool_fun = (fun () -> max_pool := Pervasives.min_float) in
let add_val_pool_fun =
(fun v -> max_pool := Pervasives.max !max_pool v)
in
let end_pool_fun = (fun () -> !max_pool) in
let compute_grad_fun = (fun input_val input_grad output_val output_grad ->
if ((Scalar.abs (input_val -. output_val)) < 1e-8)
then input_grad +. output_grad
else input_grad
) in
(_pool2d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun)
let avg_pool2d_backward padding input kernel stride output' =
let sum_pool = ref 0. in
let cnt = ref 0. in
let init_pool_fun = (fun () -> (sum_pool := 0.; cnt := 0.)) in
let add_val_pool_fun =
(fun v -> sum_pool := !sum_pool +. v; cnt := !cnt +. 1.)
in
let end_pool_fun = (fun () -> (!sum_pool /. !cnt)) in
let compute_grad_fun =
(fun input_val input_grad output_val output_grad ->
input_grad +. output_grad /. !cnt)
in
(_pool2d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun)
let _pool3d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun =
assert (num_dims input = 5);
assert (Array.length kernel = 3);
assert (Array.length stride = 3);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = input_shp.(2) in
let input_dpts = input_shp.(3) in
let in_channel = input_shp.(4) in
let kernel_cols = kernel.(0) in
let kernel_rows = kernel.(1) in
let kernel_dpts = kernel.(2) in
let col_stride = stride.(0) in
let row_stride = stride.(1) in
let dpt_stride = stride.(2) in
let output_shp = shape output' in
let output_cols = output_shp.(1) in
let output_rows = output_shp.(2) in
let output_dpts = output_shp.(3) in
assert (batches = output_shp.(0));
assert (in_channel = output_shp.(4));
let (pad_top, pad_left, pad_shallow, _, _, _) =
Owl_utils_conv.calc_conv3d_padding
input_cols input_rows input_dpts
kernel_cols kernel_rows kernel_dpts
output_cols output_rows output_dpts
row_stride col_stride dpt_stride
in
let input' = zeros (kind input) (shape input) in
begin
for b = 0 to batches - 1 do
for i = 0 to output_cols - 1 do
for j = 0 to output_rows - 1 do
for dpt = 0 to output_dpts - 1 do
for k = 0 to in_channel - 1 do
init_pool_fun ();
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for dk = 0 to kernel_dpts - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_dpt = dpt * dpt_stride + dk - pad_shallow in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows) &&
(0 <= in_dpt) && (in_dpt < input_dpts))
then add_val_pool_fun (get input [|b; in_col; in_row; in_dpt; k|])
done;
done;
done;
let output_val = end_pool_fun () in
let output_grad = get output' [|b; i; j; dpt; k|] in
for di = 0 to kernel_cols - 1 do
for dj = 0 to kernel_rows - 1 do
for dk = 0 to kernel_dpts - 1 do
let in_col = i * col_stride + di - pad_left in
let in_row = j * row_stride + dj - pad_top in
let in_dpt = dpt * dpt_stride + dk - pad_shallow in
if ((0 <= in_col) && (in_col < input_cols) &&
(0 <= in_row) && (in_row < input_rows) &&
(0 <= in_dpt) && (in_dpt < input_dpts))
then
let input_val = (get input [|b; in_col; in_row; in_dpt; k|]) in
let input_grad = (get input' [|b; in_col; in_row; in_dpt; k|]) in
set input' [|b; in_col; in_row; in_dpt; k|]
(compute_grad_fun input_val input_grad output_val output_grad)
done;
done;
done;
done;
done;
done;
done;
done;
input'
end
let max_pool3d_backward padding input kernel stride output' =
let max_pool = ref 0. in
let init_pool_fun = (fun () -> max_pool := Pervasives.min_float) in
let add_val_pool_fun =
(fun v -> max_pool := Pervasives.max !max_pool v)
in
let end_pool_fun = (fun () -> !max_pool) in
let compute_grad_fun = (fun input_val input_grad output_val output_grad ->
if ((Scalar.abs (input_val -. output_val)) < 1e-8)
then input_grad +. output_grad
else input_grad
) in
(_pool3d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun)
let avg_pool3d_backward padding input kernel stride output' =
let sum_pool = ref 0. in
let cnt = ref 0. in
let init_pool_fun = (fun () -> (sum_pool := 0.; cnt := 0.)) in
let add_val_pool_fun =
(fun v -> sum_pool := !sum_pool +. v; cnt := !cnt +. 1.)
in
let end_pool_fun = (fun () -> (!sum_pool /. !cnt)) in
let compute_grad_fun =
(fun input_val input_grad output_val output_grad ->
input_grad +. output_grad /. !cnt)
in
(_pool3d_backward padding input kernel stride output'
init_pool_fun add_val_pool_fun end_pool_fun compute_grad_fun)
let max_pool1d_backward padding input kernel stride output' =
assert (num_dims input = 3);
assert (Array.length kernel = 1);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = 1 in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_cols = kernel.(0) in
let kernel_rows = 1 in
let kernel = [|kernel_rows; kernel_cols|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
let output_rows = 1 in
let out_channel = output'_shp.(2) in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let input' = max_pool2d_backward padding input kernel stride output' in
reshape input' input_shp
let avg_pool1d_backward padding input kernel stride output' =
assert (num_dims input = 3);
assert (Array.length kernel = 1);
assert (Array.length stride = 1);
let input_shp = shape input in
let batches = input_shp.(0) in
let input_cols = input_shp.(1) in
let input_rows = 1 in
let in_channel = input_shp.(2) in
let input = reshape input [|batches; input_rows; input_cols; in_channel|] in
let kernel_cols = kernel.(0) in
let kernel_rows = 1 in
let kernel = [|kernel_rows; kernel_cols|] in
let col_stride = stride.(0) in
let row_stride = 1 in
let stride = [|row_stride; col_stride|] in
let output'_shp = shape output' in
let output_cols = output'_shp.(1) in
let output_rows = 1 in
let out_channel = output'_shp.(2) in
let output' = reshape output' [|batches; output_rows; output_cols; out_channel|] in
let input' = avg_pool2d_backward padding input kernel stride output' in
reshape input' input_shp
let _remove_unit_dims dims =
let removed_ones_list = List.filter (fun x -> x > 1) (Array.to_list dims) in
let not_empty_list = match removed_ones_list with
| [] -> [1]
| _ -> removed_ones_list
in
(Array.of_list not_empty_list)
let _check_is_matrix dims =
if (Array.length dims) != 2
then raise (Invalid_argument "The given NDarray is not a matrix!")
else ()
let row_num varr =
let dims = shape varr in
(_check_is_matrix dims; dims.(0))
let col_num varr =
let dims = shape varr in
(_check_is_matrix dims; dims.(1))
let row varr ind =
let dims = shape varr in
(_check_is_matrix dims; Genarray.slice_left varr [|ind|])
let rows varr indices =
let dims = shape varr in
let _ = _check_is_matrix dims in
let new_rownum = Array.length indices in
let new_colnum = dims.(1) in
let new_varr = empty (kind varr) [|new_rownum; new_colnum|] in
begin
for i = 0 to new_rownum - 1 do
Genarray.blit
(Genarray.slice_left varr [|indices.(i)|])
(Genarray.slice_left new_varr [|i|])
done;
new_varr
end
let copy_row_to vec varr ind =
let dims = shape varr in
let _ = _check_is_matrix dims in
(Genarray.blit vec (Genarray.slice_left varr [|ind|]))
let copy_col_to vec varr ind =
let dims = shape varr in
let _ = _check_is_matrix dims in
let vec_dims = _remove_unit_dims(shape vec) in
let vec_len =
if (Array.length vec_dims) = 1
then vec_dims.(0)
else raise (Invalid_argument "Vector is not a column vector")
in
let num_rows = dims.(0) in
let vec_linear = flatten vec |> array1_of_genarray in
if num_rows != vec_len
then raise (Invalid_argument "Column vector does not have the same length as the number of rows in the matrix")
else
begin
for i = 0 to num_rows - 1 do
Genarray.set varr [|i; ind|] (Array1.unsafe_get vec_linear i)
done
end
let dot varr_a varr_b =
let (dims_a, dims_b) = (shape varr_a, shape varr_b) in
let (_, _) = (_check_is_matrix dims_a, _check_is_matrix dims_b) in
let m = dims_a.(0) in
let cdim = dims_a.(1) in
let n = dims_b.(1) in
if (dims_b.(0)) != cdim
then raise (Invalid_argument "Matrices cannot be multipled")
else
let varr_c = empty (kind varr_a) [|m; n|] in
let sum = ref 0. in
begin
for i = 0 to m - 1 do
for j = 0 to n - 1 do
sum := 0.;
for k = 0 to cdim - 1 do
sum := !sum +. ((Genarray.get varr_a [|i; k|]) *. (Genarray.get varr_b [|k; j|]))
done;
Genarray.set varr_c [|i; j|] !sum
done
done;
varr_c
end
let trace varr =
let dims = shape varr in
let _ = _check_is_matrix dims in
let n = dims.(0) in
if dims.(1) != n
then raise (Invalid_argument "Argument is not a square matrix")
else
let sum = ref 0. in
begin
for i = 0 to n - 1 do
sum := !sum +. (Genarray.get varr [|i; i|])
done;
!sum
end
let to_rows varr =
let dims = shape varr in
let _ = _check_is_matrix dims in
let m = dims.(0) in
(Array.init m (fun i -> (Genarray.slice_left varr [|i|])))
let of_rows rows =
let m = Array.length rows in
let row_dim = shape (rows.(0)) in
let dims = Array.append [|m|] row_dim in
let varr = empty (kind rows.(0)) dims in
begin
for i = 0 to m - 1 do
Genarray.blit rows.(i) (Genarray.slice_left varr [|i|])
done;
varr
end
let of_arrays kind arrays =
let m = Array.length arrays in
let n = Array.length (arrays.(0)) in
let varr = empty kind [|m; n|] in
begin
for i = 0 to m - 1 do
for j = 0 to n - 1 do
Genarray.set varr [|i; j|] (Array.unsafe_get (arrays.(i)) j)
done
done;
varr
end
let draw_rows ?(replacement=true) varr count =
let dims = shape varr in
let indices = _draw_int_samples replacement (Array.length dims) count in
let = rows varr indices in
(extracted, indices)
let draw_rows2 ?(replacement=true) varr_a varr_b count =
let , indices =
draw_rows ~replacement:replacement varr_a count in
let = rows varr_b indices in
(extracted_a, extracted_b, indices)
let inv varr =
let dims = shape varr in
let _ = _check_is_matrix dims in
let n = Array.unsafe_get dims 0 in
if (Array.unsafe_get dims 1) != n
then failwith "no inverse - the matrix is not square"
else
let pivot_row = Array.make n 0. in
let result_varr = copy varr in
begin
for p = 0 to n - 1 do
let pivot_elem = get result_varr [|p; p|] in
if get result_varr [|p; p|] = 0.
then failwith "the matrix does not have an inverse";
for j = 0 to n - 1 do
pivot_row.(j) <- get result_varr [|p; j|];
if j != p
then set result_varr [|p; j|] (pivot_row.(j) /. pivot_elem)
done;
for i = 0 to n - 1 do
if i != p
then set result_varr [|i; p|]
((get result_varr [|i; p|]) /. (~-. pivot_elem))
done;
for i = 0 to n - 1 do
let pivot_col_elem = get result_varr [|i; p|] in
for j = 0 to n - 1 do
if i != p && j != p
then
let pivot_row_elem = pivot_row.(j) in
let old_val = get result_varr [|i; j|] in
let new_val = old_val +. (pivot_row_elem *. pivot_col_elem) in
(set result_varr [|i; j|] new_val)
done;
done;
set result_varr [|p; p|] (1. /. pivot_elem)
done;
result_varr
end
let load k f = Owl_io.marshal_from_file f
let max_rows varr =
let dims = shape varr in
let _ = _check_is_matrix dims in
let r, c = dims.(0), dims.(1) in
let result = Array.make r (0., 0, 0) in
begin
for i = 0 to r - 1 do
let best = ref Pervasives.min_float in
let best_pos = ref ~- 1 in
for j = 0 to c - 1 do
let x = get varr [|i; j|] in
if (x > !best)
then (best := x; best_pos := j)
done;
result.(i) <- (!best, i, !best_pos)
done;
result
end