1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
module Vector = Linalg.Vec.Float
module Matrix = Linalg.Mat.Float
type vector = int Vector.t
type matrix = (int * int) Matrix.t
type out_matrix = (int * int) Matrix.out
let vec_dim (v : vector) = Linalg.Tensor.Int.numel @@ Vector.idim v
let col_dim (m : matrix) = Linalg.Tensor.Int.numel @@ Matrix.cols m
let row_dim (m : matrix) = Linalg.Tensor.Int.numel @@ Matrix.rows m
let matrix_of_array_array (m : float array array) : matrix =
let r = Array.length m in
let c = Array.length m.(0) in
Matrix.make (Linalg.Tensor.Int.rank_two c r) @@ fun (c, r) -> m.(r).(c)
let out_matrix_of_array_array (m : float array array) : out_matrix =
let r = Array.length m in
let c = Array.length m.(0) in
Linalg.OVec (Linalg.Tensor.Int.rank_two c r, fun (c, r) x -> m.(r).(c) <- x)
let mm_ (out : out_matrix) (m1 : matrix) (m2 : matrix) =
let cols1 = col_dim m1 in
let rows2 = row_dim m2 in
assert (cols1 = rows2) ;
let cols2 = col_dim m2 in
let rows1 = row_dim m1 in
let out_shape = Matrix.odim out in
let out_cols = Linalg.Tensor.Int.numel @@ Linalg.Tensor.Int.fst out_shape in
let out_rows = Linalg.Tensor.Int.numel @@ Linalg.Tensor.Int.snd out_shape in
assert (out_cols = cols2 && out_rows = rows1) ;
let (Linalg.OVec (_, write)) = out in
for c = 0 to cols2 - 1 do
for r = 0 to rows1 - 1 do
let acc = ref 0.0 in
for k = 0 to cols1 - 1 do
acc :=
!acc +. (Matrix.unsafe_get m2 (c, k) *. Matrix.unsafe_get m1 (k, r))
done ;
write (c, r) !acc
done
done
let vector_to_array (v : vector) : float array =
let rows = Linalg.Tensor.Int.numel @@ Vector.idim v in
Array.init rows (fun i -> Vector.get v i)
let vector_of_array (array : float array) =
Linalg.Vec.Float.make
(Linalg.Tensor.Int.rank_one (Array.length array))
(Array.get array)
let vector_to_seq vec =
let numel = Linalg.Tensor.Int.numel (Linalg.Vec.Float.idim vec) in
let rec loop i () =
if i = numel then Seq.Nil
else Seq.Cons (Linalg.Vec.Float.get vec i, loop (i + 1))
in
loop 0
let map_rows f matrix =
Vector.make (Matrix.rows matrix) (fun r ->
let row = Matrix.row matrix r in
f row)
let mapi_rows f matrix =
Vector.make (Matrix.rows matrix) (fun r ->
let row = Matrix.row matrix r in
f r row)
let map_cols f matrix =
Vector.make (Matrix.cols matrix) (fun c ->
let col = Matrix.col matrix c in
f col)
let empty_matrix =
Matrix.make (Linalg.Tensor.Int.rank_two 0 0) @@ fun _ -> assert false
let l2_norm vec =
vec |> Vector.map (fun x -> x *. x) |> Vector.reduce ( +. ) 0.0 |> sqrt
let mse vec =
(vec |> Vector.map (fun x -> x *. x) |> Vector.reduce ( +. ) 0.0)
/. float_of_int (vec_dim vec)
let average vec = Vector.reduce ( +. ) 0.0 vec /. float_of_int (vec_dim vec)
let std vec =
let avg = average vec in
let sum_of_squares =
vec
|> Vector.map (fun x ->
let c = x -. avg in
c *. c)
|> Vector.reduce ( +. ) 0.0
in
sqrt (sum_of_squares /. float_of_int (vec_dim vec))
let vector_encoding : vector Data_encoding.t =
Data_encoding.(conv vector_to_array vector_of_array (array float))
let pp_vec fmtr vec =
let elts = List.of_seq @@ vector_to_seq vec in
Format.pp_print_list
~pp_sep:(fun fmtr () -> Format.fprintf fmtr ",")
Format.pp_print_float
fmtr
elts