SagaSourceSaga - Fast tokenization and text processing for ML in OCaml.
Saga is a comprehensive text processing library for machine learning applications, providing fast tokenization and modern text generation capabilities. It combines simplicity for common use cases with flexibility for advanced workflows.
Saga consists of three main components:
tokenization: Fast tokenization with BPE, WordPiece, and custom methodsio: Efficient file I/O utilities for large text corporasampling: Advanced text generation with composable processorsAll components work together seamlessly but can be used independently.
(* Create a model function (typically a neural network) *)
let model_fn token_ids =
(* Your neural network forward pass *)
Array.make 50000 0.0 (* Example: uniform logits *)
(* Build a tokenizer *)
let tok = Tokenizer.chars ()
(* Create encoder and decoder functions *)
let tokenizer_fn text =
Tokenizer.encode tok text |> Encoding.get_ids |> Array.to_list
in
let decoder_fn ids =
Tokenizer.decode tok ids
in
(* Configure generation with custom processors *)
let config =
Sampler.default
|> Sampler.with_temperature 0.9
|> Sampler.with_top_k 40
|> Sampler.with_repetition_penalty 1.1
(* Generate with fine-grained control *)
let result =
Sampler.generate_text ~model:model_fn ~tokenizer:tokenizer_fn
~decoder:decoder_fn ~prompt:"Hello" ~generation_config:config ()read_lines_lazy for very large files to avoid memory issuesFast and flexible tokenization supporting multiple algorithms and custom patterns. Handles everything from simple word splitting to advanced subword tokenization.
Load a pretrained tokenizer:
let tokenizer = Tokenizer.from_file "tokenizer.json" |> Result.get_ok in
let encoding = Tokenizer.encode tokenizer "Hello world!" in
let ids = Encoding.get_ids encodingCreate a BPE tokenizer from scratch:
let tokenizer =
Tokenizer.bpe
~vocab:[("hello", 0); ("world", 1); ("[PAD]", 2)]
~merges:[]
()
in
let encoding = Tokenizer.encode tokenizer "hello world" in
let text = Tokenizer.decode tokenizer [0; 1]Train a new tokenizer:
let texts = [ "Hello world"; "How are you?"; "Hello again" ] in
let tokenizer =
Tokenizer.train_bpe (`Seq (List.to_seq texts)) ~vocab_size:1000 ()
in
Tokenizer.save_pretrained tokenizer ~path:"./my_tokenizer"Tokenization proceeds through stages:
Each stage is optional and configurable via builder methods.
Post-processing patterns are model-specific:
CLS at start, SEP at end, type IDs distinguish sequencesUnicode utilities for normalization.
Text normalization (lowercase, NFD/NFC, accent stripping, etc.).
Pre-tokenization (whitespace splitting, punctuation handling, etc.).
Post-processing (adding CLS/SEP, setting type IDs, etc.).
Decoding token IDs back to text.
Encoding representation (output of tokenization).
Direction for padding or truncation: `Left (beginning) or `Right (end).
type special = {token : string;The token text (e.g., "<pad>", "<unk>").
*)single_word : bool;Whether this token must match whole words only. Default: false.
lstrip : bool;Whether to strip whitespace on the left. Default: false.
rstrip : bool;Whether to strip whitespace on the right. Default: false.
normalized : bool;Whether to apply normalization to this token. Default: true for regular tokens, false for special tokens.
}Special token configuration.
Special tokens are not split during tokenization and can be skipped during decoding. Token IDs are assigned automatically when added to the vocabulary.
All special token types are uniform - the semantic meaning (pad, unk, bos, etc.) is contextual, not encoded in the type.
Padding length strategy.
`Batch_longest: Pad to longest sequence in batch`Fixed n: Pad all sequences to fixed length n`To_multiple n: Pad to smallest multiple of n >= sequence lengthtype padding = {length : pad_length;direction : direction;pad_id : int option;pad_type_id : int option;pad_token : string option;}Padding configuration.
When optional fields are None, falls back to tokenizer's configured padding token. If the tokenizer has no padding token configured and these fields are None, padding operations will raise Invalid_argument.
Truncation configuration.
Limits sequences to max_length tokens, removing from specified direction.
type data = [ | `Files of string list| `Seq of string Seq.t| `Iterator of unit -> string option ]Training data source.
`Files paths: Read training text from files`Seq seq: Use sequence of strings`Iterator f: Pull training data via iterator (None signals end)Efficient file I/O utilities optimized for large text corpora and ML workflows.
read_lines ?buffer_size filename efficiently reads all lines from a file.
Features:
read_lines_lazy ?buffer_size filename returns a lazy sequence of lines.
Use this for very large files to avoid loading everything into memory. The file is automatically closed when the sequence is fully consumed or when an error occurs.
Note: If the sequence is only partially consumed and then abandoned (e.g., using Seq.take), the file descriptor may remain open until garbage collection. For guaranteed cleanup, consume the sequence fully or handle resources explicitly.
write_lines ?append filename lines writes lines to a file.
Modern text generation with composable processors and fine-grained control, designed for integration with neural language models.
open Saga
(* Character tokenization *)
let tok = Tokenizer.chars ()
let enc = Tokenizer.encode tok "Hello world!"
let ids = Encoding.get_ids enc
let text = Tokenizer.decode tok (Array.to_list ids)
(* BPE tokenization with batch processing *)
let tok = Tokenizer.from_file "tokenizer.json" |> Result.get_ok
let batch_enc = Tokenizer.encode_batch tok [ "Hello"; "World" ] (* Wraps a neural model for Sampler integration. Example - illustrative
pseudocode, adapt to your model API. *)
let setup_neural_generation neural_model =
let tok = Tokenizer.from_file "tokenizer.json" |> Result.get_ok in
(* Model function: token_ids -> logits *)
let model_fn token_ids =
(* Convert to your model's input format *)
let input_tensor = your_tensor_creation_fn token_ids in
let output_tensor = neural_model input_tensor in
(* Convert output to float array *)
your_tensor_to_array_fn output_tensor
in
(* Configure generation with custom processors *)
let config =
Sampler.creative_writing
|> Sampler.with_max_new_tokens 200
|> Sampler.with_repetition_penalty 1.15
in
(* Generate text *)
Sampler.generate_text ~model:model_fn ~tokenizer:tok ~prompt:"Hello"
~generation_config:config ()