123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851(*****************************************************************************)(* *)(* Open Source License *)(* Copyright (c) 2022 Nomadic Labs <contact@nomadic-labs.com> *)(* *)(* Permission is hereby granted, free of charge, to any person obtaining a *)(* copy of this software and associated documentation files (the "Software"),*)(* to deal in the Software without restriction, including without limitation *)(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *)(* and/or sell copies of the Software, and to permit persons to whom the *)(* Software is furnished to do so, subject to the following conditions: *)(* *)(* The above copyright notice and this permission notice shall be included *)(* in all copies or substantial portions of the Software. *)(* *)(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*)(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *)(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *)(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*)(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *)(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *)(* DEALINGS IN THE SOFTWARE. *)(* *)(*****************************************************************************)openError_monadincludeCryptobox_intfmoduleBase58=Tezos_crypto.Base58moduleSrs_g1=Octez_bls12_381_polynomial.Srs.Srs_g1moduleSrs_g2=Octez_bls12_381_polynomial.Srs.Srs_g2typeerror+=Failed_to_load_trusted_setupofstringlet()=register_error_kind`Permanent~id:"dal.node.trusted_setup_loading_failed"~title:"Trusted setup loading failed"~description:"Trusted setup failed to load"~pp:(funppfmsg->Format.fprintfppf"Trusted setup failed to load: %s"msg)Data_encoding.(obj1(req"msg"string))(function|Failed_to_load_trusted_setupparameter->Someparameter|_->None)(funparameter->Failed_to_load_trusted_setupparameter)[@@coverageoff]typeinitialisation_parameters={srs_g1:Srs_g1.t;srs_g2:Srs_g2.t}(* Initialisation parameters are supposed to be instantiated once. *)letinitialisation_parameters=refNonetypeerror+=Dal_initialisation_twicelet()=register_error_kind`Permanent~id:"dal.node.initialisation_twice"~title:"Initialisation_twice"~description:"DAL parameters were initialised twice"~pp:(funppf()->Format.fprintfppf"DAL parameters were initialised twice")Data_encoding.empty(functionDal_initialisation_twice->Some()|_->None)(function()->Dal_initialisation_twice)[@@coverageoff](* This function is expected to be called once. *)letload_parametersparameters=letopenResult_syntaxinmatch!initialisation_parameterswith|None->initialisation_parameters:=Someparameters;return_unit|Some_->fail[Dal_initialisation_twice](* FIXME https://gitlab.com/tezos/tezos/-/issues/3400
An integrity check is run to ensure the validity of the files. *)letinitialisation_parameters_from_files~srs_g1_path~srs_g2_path~srs_size_log2=letopenLwt_result_syntaxinletlen=1lslsrs_size_log2inletto_bigstring~path=letopenLwt_syntaxinlet*fd=Lwt_unix.openfilepath[Unix.O_RDONLY]0o440inLwt.finalize(fun()->return(matchLwt_bytes.map_file~fd:(Lwt_unix.unix_file_descrfd)~shared:false()with|exceptionUnix.Unix_error(error_code,function_name,_)->Error[Failed_to_load_trusted_setup(Format.sprintf"%s: Unix.Unix_error: %s"function_name(Unix.error_messageerror_code));]|exceptione->Error[Failed_to_load_trusted_setup(Printexc.to_stringe)]|res->Okres))(fun()->Lwt_unix.closefd)inlet*srs_g1_bigstring=to_bigstring~path:srs_g1_pathinlet*srs_g2_bigstring=to_bigstring~path:srs_g2_pathinmatchletopenResult_syntaxinlet*srs_g1=Srs_g1.of_bigstringsrs_g1_bigstring~leninlet*srs_g2=Srs_g2.of_bigstringsrs_g2_bigstring~leninreturn(srs_g1,srs_g2)with|Error(`End_of_files)->tzfail(Failed_to_load_trusted_setup("EOF: "^s))|Error(`Invalid_pointp)->tzfail(Failed_to_load_trusted_setup(Printf.sprintf"Invalid point %i"p))|Ok(srs_g1,srs_g2)->return{srs_g1;srs_g2}(* The srs is made of the initialisation_parameters and two
well-choosen points. Building the srs from the initialisation
parameters is almost cost-free. *)typesrs={raw:initialisation_parameters;kate_amortized_srs_g2_shards:Bls12_381.G2.t;kate_amortized_srs_g2_pages:Bls12_381.G2.t;}moduleInner=struct(* Scalars are elements of the prime field Fr from BLS. *)moduleScalar=Bls12_381.FrmodulePolynomials=Octez_bls12_381_polynomial.PolynomialmoduleG1_array=Octez_bls12_381_polynomial.G1_carray(* Operations on vector of scalars *)moduleEvaluations=Octez_bls12_381_polynomial.Evaluations(* Domains for the Fast Fourier Transform (FTT). *)moduleDomains=Octez_bls12_381_polynomial.Domaintypeslot=bytestypescalar=Scalar.ttypepolynomial=Polynomials.ttypecommitment=Bls12_381.G1.ttypeshard_proof=Bls12_381.G1.ttypecommitment_proof=Bls12_381.G1.ttypepage_proof=Bls12_381.G1.ttypepage=bytestypeshare=Scalar.tarraytypeshard={index:int;share:share}typeshards_proofs_precomputation=scalararray*shard_proofarrayarraytype('a,'b)error_container={given:'a;expected:'b}moduleEncoding=structopenData_encodingletfr_encoding=convBls12_381.Fr.to_bytesBls12_381.Fr.of_bytes_exn(Fixed.bytesBls12_381.Fr.size_in_bytes)(* FIXME https://gitlab.com/tezos/tezos/-/issues/3391
The commitment is not bounded. *)letg1_encoding=convBls12_381.G1.to_compressed_bytesBls12_381.G1.of_compressed_bytes_exnbytesletpage_proof_encoding=g1_encodingletshare_encoding=arrayfr_encodingletshard_proof_encoding=g1_encodingletshard_encoding=conv(fun{index;share}->(index,share))(fun(index,share)->{index;share})(tup2int31share_encoding)[@@coverageoff]letshards_proofs_precomputation_encoding=tup2(arrayfr_encoding)(array(arrayg1_encoding))leterror_container_encoding(given_encoding:'aencoding)(expected_encoding:'bencoding):('a,'b)error_containerencoding=conv(fun{given;expected}->(given,expected))(fun(given,expected)->{given;expected})(obj2(req"given"given_encoding)(req"expected"expected_encoding))endincludeEncodingmoduleCommitment=structtypet=commitmenttypeBase58.data+=Dataoftletzero=Bls12_381.G1.zeroletequal=Bls12_381.G1.eqletcommitment_to_bytes=Bls12_381.G1.to_compressed_bytesletcommitment_of_bytes_opt=Bls12_381.G1.of_compressed_bytes_opt[@@coverageoff]letcommitment_of_bytes_exnbytes=matchBls12_381.G1.of_compressed_bytes_optbyteswith|None->Format.kasprintfStdlib.failwith"Unexpected data (DAL commitment)"|Somecommitment->commitment[@@coverageoff]letcommitment_size=Bls12_381.G1.compressed_size_in_bytes[@@coverageoff]letto_stringcommitment=commitment_to_bytescommitment|>Bytes.to_string[@@coverageoff]letof_string_optstr=commitment_of_bytes_opt(String.to_bytesstr)[@@coverageoff]letb58check_encoding=Base58.register_encoding~prefix:Base58.Prefix.slot_header~length:commitment_size~to_raw:to_string~of_raw:of_string_opt~wrap:(funx->Datax)[@@coverageoff]letraw_encoding=letopenData_encodinginconvcommitment_to_bytescommitment_of_bytes_exn(Fixed.bytescommitment_size)[@@coverageoff](* TODO: https://gitlab.com/tezos/tezos/-/issues/5593
We could have a smarter compare eg. using lazy Data_encoding.compare that
encodes the two values to bytes lazily and stops as soon as the values
are detected to be not equal. *)letcompare_commitmentsab=(* We are obliged to compare commitments by casting to bytes because
{!Bls12_381.G1} doesn't provide a compare function. *)ifBls12_381.G1.eqabthen0elseBytes.compare(Bls12_381.G1.to_bytesa)(Bls12_381.G1.to_bytesb)letcompare=compare_commitmentsincludeTezos_crypto.Helpers.Make(structtypet=commitmentletname="DAL_commitment"lettitle="Commitment representation for the DAL"letb58check_encoding=b58check_encodingletraw_encoding=raw_encodingletcompare=compare_commitmentsletequal=Bls12_381.G1.eqlethash_=(* The commitment is not hashed. This is ensured by the
function exposed. We only need the Base58 encoding and the
rpc_arg. *)assertfalse[@@coverageoff]letseeded_hash__=(* Same argument. *)assertfalse[@@coverageoff]end)letof_b58check=of_b58checkendmoduleCommitment_proof=structletzero=Bls12_381.G1.zeroletto_bytes=Bls12_381.G1.to_compressed_bytesletof_bytes_exnbytes=matchBls12_381.G1.of_compressed_bytes_optbyteswith|None->Format.kasprintfStdlib.failwith"Unexpected data (DAL commitment proof)"|Someproof->proof[@@coverageoff]letsize=Bls12_381.G1.compressed_size_in_bytesletraw_encoding=letopenData_encodinginconvto_bytesof_bytes_exn(Fixed.bytessize)letencoding=raw_encodingendtypeerror+=Invalid_precomputation_hashof(string,string)error_containerlet()=register_error_kind`Permanent~id:"dal.node.invalid_precomputation_hash"~title:"Invalid_precomputation_hash"~description:"Unexpected precomputation hash"~pp:(funppf{given;expected}->Format.fprintfppf"Invalid precomputation hash: expected %s. Got %s"expectedgiven)(Encoding.error_container_encodingData_encoding.stringData_encoding.string)(functionInvalid_precomputation_hasherr->Someerr|_->None)(functionerr->Invalid_precomputation_hasherr)[@@coverageoff](* Number of bytes fitting in a Scalar.t. Since scalars are integer modulo
r~2^255, we restrict ourselves to 248-bit integers (31 bytes). *)letscalar_bytes_amount=Scalar.size_in_bytes-1(* Builds group of nth roots of unity, a valid domain for the FFT. *)letmake_domainn=Domains.buildntypet={redundancy_factor:int;slot_size:int;page_size:int;number_of_shards:int;(* Maximum length of the polynomial representation of a slot, also called [polynomial_length]
in the comments. *)max_polynomial_length:int;(* Length of the erasure-encoded polynomial representation of a slot,
also called [erasure_encoded_polynomial_length] in the comments. *)erasure_encoded_polynomial_length:int;domain_polynomial_length:Domains.t;(* Domain for the FFT on slots as polynomials to be erasure encoded. *)domain_2_times_polynomial_length:Domains.t;domain_erasure_encoded_polynomial_length:Domains.t;(* Domain for the FFT on erasure encoded slots (as polynomials). *)shard_length:int;(* Length of a shard in terms of scalar elements. *)pages_per_slot:int;(* Number of slot pages. *)page_length:int;page_length_domain:int;remaining_bytes:int;(* Log of the number of evaluations that constitute an erasure encoded
polynomial. *)srs:srs;}(* The input is expected to be a positive integer. *)letis_power_of_twon=assert(n>=0);n<>0&&nland(n-1)=0(* Return the powerset of {3,11,19}. *)letcombinations_factors=letrecpowerset=function|[]->[[]]|x::xs->letps=powersetxsinList.concat[ps;List.map(funss->x::ss)ps]inpowerset[3;11;19](* [select_fft_domain domain_size] selects a suitable domain for the FFT.
The domain size [domain_size] is expected to be strictly positive.
Return [(size, power_of_two, remainder)] such that:
* If [domain_size > 1], then [size] is the smallest integer greater or
equal to [domain_size] and is of the form 2^a * 3^b * 11^c * 19^d,
where a ∈ ⟦0, 32⟧, b ∈ {0, 1}, c ∈ {0, 1}, d ∈ {0, 1}.
* If [domain_size = 1], then [size = 2].
* [size = power_of_two * remainder], [power_of_two] is a power of two,
and [remainder] is not divisible by 2.
The function works as follows: each product of elements from
an element of the powerset of {3,11,19} is multiplied by 2
until the product is greater than [domain_size]. *)letselect_fft_domain(domain_size:int):int*int*int=assert(domain_size>0);(* {3,11,19} are small prime factors dividing [Scalar.order - 1],
the order of the multiplicative group Fr\{0}. *)letorder_multiplicative_group=Z.predScalar.orderinassert(List.for_all(funx->Z.(divisibleorder_multiplicative_group(of_intx)))[3;11;19]);(* This case is needed because the code in the else clause will return
(1, 1, 1) and 1 is not a valid domain size. *)ifdomain_size=1then(2,2,1)else(* [domain_from_factors] computes the power of two to be used in the
decomposition N = 2^k * factors >= domain_size where [factors] is an
element of [combinations_factors]. *)letdomain_from_factors(factors:intlist):int*intlist=letprod_factors=List.fold_left(*)1factorsinletrecget_next_power_of_twok=ifprod_factorslslk>=domain_sizethen1lslkelseget_next_power_of_two(k+1)inletnext_power_of_two=get_next_power_of_two0inletsize=prod_factors*next_power_of_twoin(size,next_power_of_two::factors)inletcandidate_domains=List.mapdomain_from_factorscombinations_factorsin(* The list contains at least an element: the next power of 2 of domain_size *)letdomain_length,prime_factor_decomposition=List.fold_leftmin(List.hdcandidate_domains)(List.tlcandidate_domains)inletpower_of_two=List.hdprime_factor_decompositioninletremainder_product=List.fold_left(*)1(List.tlprime_factor_decomposition)in(domain_length,power_of_two,remainder_product)letfft_aux~dft~fft~fft_pfasizecoefficients=(* domain_length = power_of_two * remainder_product *)let_domain_length,power_of_two,remainder_product=select_fft_domainsizeinifsize=power_of_two||size=remainder_productthenletdomain=Domains.buildsizein(ifis_power_of_twosizethenfftelsedft)domaincoefficientselseletdomain1=Domains.buildpower_of_twoinletdomain2=Domains.buildremainder_productinfft_pfa~domain1~domain2coefficientsletfft=fft_aux~dft:Evaluations.dft~fft:Evaluations.evaluation_fft~fft_pfa:Evaluations.evaluation_fft_prime_factor_algorithmletifft_inplace=fft_aux~dft:Evaluations.idft~fft:Evaluations.interpolation_fft~fft_pfa:Evaluations.interpolation_fft_prime_factor_algorithm(* The page size is a power of two and thus not a multiple of [scalar_bytes_amount],
hence the + 1 to account for the remainder of the division. *)letpage_length~page_size=Int.divpage_sizescalar_bytes_amount+1(* [slot_as_polynomial_length ~slot_size ~page_size] returns the length of the
polynomial of maximal degree representing a slot of size [slot_size] with
[slot_size / page_size] pages. The returned length thus depends on the number
of pages. *)letslot_as_polynomial_length~slot_size~page_size=letpage_length=page_length~page_sizeinletpage_length_domain,_,_=select_fft_domainpage_lengthinslot_size/page_size*page_length_domainletensure_validity~slot_size~page_size~erasure_encoded_polynomial_length~max_polynomial_length~redundancy_factor~number_of_shards~shard_length~srs_g1_length~srs_g2_length=letopenResult_syntaxinletassert_resultconditionerror_message=ifnotconditionthenfail(`Fail(error_message()))elsereturn_unitinlet*()=assert_result(is_power_of_twoslot_size)(* According to the specification the length of a slot are in MiB *)(fun()->Format.asprintf"Slot size is expected to be a power of 2. Given: %d"slot_size)inlet*()=assert_result(is_power_of_twopage_size)(* According to the specification the lengths of a page are in MiB *)(fun()->Format.asprintf"Page size is expected to be a power of 2. Given: %d"page_size)inlet*()=assert_result(is_power_of_tworedundancy_factor&&redundancy_factor>=2)(* The redundancy factor should be a power of 2 so that n is a power of 2
for proper FFT sizing. The variable [polynomial_length] is assumed to be a power of 2
as the output of [slot_as_polynomial_length]. *)(fun()->Format.asprintf"Redundancy factor is expected to be a power of 2 and greater than \
2. Given: %d"redundancy_factor)in(* At this point [erasure_encoded_polynomial_length] is a power of 2, and [erasure_encoded_polynomial_length > max_polynomial_length]. *)let*()=assert_result(page_size>=32&&page_size<slot_size)(* The size of a page must be greater than 31 bytes (32 > 31 is the next
power of two), the size in bytes of a scalar element, and strictly less
than the slot size. *)(fun()->Format.asprintf"Page size is expected to be greater than '32' and strictly less \
than the slot_size '%d'. Got: %d"slot_sizepage_size)inletmax_two_adicity_log=32inlettwo_adicity_log=sndZ.(remove(of_interasure_encoded_polynomial_length)(of_int2))inlet*()=assert_result(two_adicity_log<=max_two_adicity_log)(* The 2-adicity of [erasure_encoded_polynomial_length] must be at most 2^32,
the size of the biggest subgroup of 2^i roots of unity in the multiplicative group of Fr,
because the FFTs operate on such groups. *)(fun()->Format.asprintf"Slot size (%d) and/or redundancy factor (%d) is/are too high: \
expected 2-adicity of erasure_encoded_polynomial_length (%d) to \
be at most 2^%d, got: 2^%d"slot_sizeredundancy_factorerasure_encoded_polynomial_lengthmax_two_adicity_logtwo_adicity_log)inlet*()=assert_result(erasure_encoded_polynomial_lengthmodnumber_of_shards==0&&number_of_shards<erasure_encoded_polynomial_length)(* The number of shards must divide n, so [number_of_shards <= erasure_encoded_polynomial_length].
Moreover, the inequality is strict because if [number_of_shards = erasure_encoded_polynomial_length],
the domains for the FFT contain only one element and we cannot build
FFT domains with only one element. Given that [erasure_encoded_polynomial_length] is a power of two,
it follows that the maximum number of shards is [erasure_encoded_polynomial_length/2]. *)(fun()->Format.asprintf"The number of shards must divide, and not be equal to %d. For the \
given parameter, the maximum number of shards is %d. Got: %d."erasure_encoded_polynomial_length(erasure_encoded_polynomial_length/2)number_of_shards)inlet*()=assert_result(shard_length<max_polynomial_length)(* Since shard_length = n / number_of_shards, we obtain
(all quantities are positive integers):
shard_length < k
=> n / number_of_shards < k
=> n / k < number_of_shards
=> redundancy_factor < number_of_shards
since number_of_shards is a power of 2 the minimum value for
number_of_shards is 2 * redundancy_factor. *)(fun()->Format.asprintf"For the given parameters, the minimum number of shards is %d. Got \
%d."(redundancy_factor*2)number_of_shards)inlet*()=assert_result(max_polynomial_length<=srs_g1_length)(* The committed polynomials have degree t.max_polynomial_length - 1 at most,
so t.max_polynomial_length coefficients. *)(fun()->Format.asprintf"SRS on G1 size is too small. Expected more than %d. Got %d"max_polynomial_lengthsrs_g1_length)inassert_result(letshard_length=erasure_encoded_polynomial_length/number_of_shardsinletsrs_g2_expected_length=maxmax_polynomial_lengthshard_length+1insrs_g2_expected_length<=srs_g2_length)(fun()->Format.asprintf"SRS on G2 size is too small. Expected more than %d. Got %d"max_polynomial_lengthsrs_g2_length)typeparameters=Dal_config.parameters={redundancy_factor:int;page_size:int;slot_size:int;number_of_shards:int;}letparameters_encoding=Dal_config.parameters_encodingletpages_per_slot{slot_size;page_size;_}=slot_size/page_size(* Error cases of this functions are not encapsulated into
`tzresult` for modularity reasons. *)letmake({redundancy_factor;slot_size;page_size;number_of_shards}asparameters)=letopenResult_syntaxinletmax_polynomial_length=slot_as_polynomial_length~slot_size~page_sizeinleterasure_encoded_polynomial_length=redundancy_factor*max_polynomial_lengthinletshard_length=erasure_encoded_polynomial_length/number_of_shardsinlet*raw=match!initialisation_parameterswith|None->fail(`Fail"Dal_cryptobox.make: DAL was not initialised.")|Somesrs->returnsrsinlet*()=ensure_validity~slot_size~page_size~erasure_encoded_polynomial_length~max_polynomial_length~redundancy_factor~number_of_shards~shard_length~srs_g1_length:(Srs_g1.sizeraw.srs_g1)~srs_g2_length:(Srs_g2.sizeraw.srs_g2)inletpage_length=page_length~page_sizeinletpage_length_domain,_,_=select_fft_domainpage_lengthinletsrs={raw;kate_amortized_srs_g2_shards=Srs_g2.getraw.srs_g2shard_length;kate_amortized_srs_g2_pages=Srs_g2.getraw.srs_g2page_length_domain;}inreturn{redundancy_factor;slot_size;page_size;number_of_shards;max_polynomial_length;erasure_encoded_polynomial_length;domain_polynomial_length=make_domainmax_polynomial_length;domain_2_times_polynomial_length=make_domain(2*max_polynomial_length);domain_erasure_encoded_polynomial_length=make_domainerasure_encoded_polynomial_length;shard_length;pages_per_slot=pages_per_slotparameters;page_length;page_length_domain;remaining_bytes=page_sizemodscalar_bytes_amount;srs;}letparameters({redundancy_factor;slot_size;page_size;number_of_shards;_}:t)={redundancy_factor;slot_size;page_size;number_of_shards}[@@coverageoff]letpolynomial_degree=Polynomials.degreeletpolynomial_evaluate=Polynomials.evaluate(* [polynomials_multiplication d ps] computes the product of the
polynomials [ps]. The degree of the resulting product must
be strictly less than the size of the domain [d].
Runtime is [O(n log n)] where [n = Domains.length d]. *)letpolynomials_productdps=letevaluations=List.map(fftd)psinifft_inplaced(Evaluations.mul_c~evaluations())(* We encode by pages of [page_size] bytes each. The pages
are arranged in cosets to produce batched KZG proofs
[https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf]
using the technique described in https://eprint.iacr.org/2023/033. *)letpolynomial_from_slot(t:t)slot=(* Expects the length of the byte sequence to equal the slot size.
This can be achieved by adding some padding with null bytes to the
byte sequence if the length is strictly less than the slot size, or
truncate it if the length is strictly greater than the slot size. *)ifBytes.lengthslot<>t.slot_sizethenError(`Slot_wrong_size(Printf.sprintf"message must be %d bytes long"t.slot_size))elseletoffset=ref0inletcoefficients=Array.initt.max_polynomial_length(fun_->Scalar.(copyzero))in(* A slot is subdivided into contiguous segments, called pages.
The length of a page divides the slot size, that is:
[t.page_size * t.pages_per_slot = t.slot_size]
We parse the slot page by page. *)forpage=0tot.pages_per_slot-1do(* A permutation (so a bijection) is then applied to the elements
from a page. The code applies the permutation to a scalar
element just after it is read from the page, but it is easier to
picture it as two successive steps:
1. a serialization phase where pages are converted to sequences
of scalar elements, which is injective: if two outputs for two
slots are equal, then the slots are equal since we’re just
splitting a page into chunks of [scalar_bytes_amount] bytes and
a last one of [t.remaining_bytes] bytes.
Parse the byte sequence slot = page_0 ... page_{t.pages_per_slot-1}
page by page by chunks of [scalar_bytes_amount], or [t.remaining_bytes]
for the last page chunk.
to obtain the vector of length [polynomial_length = t.page_length * t.pages_per_slot]
(chunk p_i^j standing for i-th chunk of page j):
[ p_0^0 p_1^0 ... p_{page_length-1}^0
....
p_0^{t.pages_per_slot-1} ... p_{page_length-1}^{t.pages_per_slot-1}]
2. then apply the permutation, reindexing the page elements so
that their indices correspond to the appropriate coset.
[ p_0^0 p_0^1 ... p_0^{t.pages_per_slot-1}
...
p_{page_length-1}^0 p_{page_length-1}^1 ... p_{t.page_length-1}^{t.pages_per_slot-1}]
For a primitive k-th root of unity, the group generated by w, <w>,
can be split into cosets
<w> = Disjoint union_{j=0, ..., t.pages_per_slot - 1} w^j W
where W = { w^{t.pages_per_slot * i} }_{i=0, ..., t.page_length - 1}.
This way, the indices of the coefficients for each page
j=0, ..., t.pages_per_slot-1 coincide with the indices of the
elements of the cosets (w^j W). Indeed, the coefficients
of the elements of w^j W are
{i * t.pages_per_slot + j}_{i=0, ..., t.page_length-1}. *)forelt=0tot.page_length-2do(* [!offset >= t.slot_size] because we don't want to read past
the buffer [slot] bounds. *)if!offset>=t.slot_sizethen()elseletdst=Bytes.createscalar_bytes_amountinBytes.blitslot!offsetdst0scalar_bytes_amount;offset:=!offset+scalar_bytes_amount;(* Apply the permutation. *)coefficients.((elt*t.pages_per_slot)+page)<-Scalar.of_bytes_exndstdone;letdst=Bytes.createt.remaining_bytesinBytes.blitslot!offsetdst0t.remaining_bytes;offset:=!offset+t.remaining_bytes;(* Apply the permutation. *)coefficients.(((t.page_length-1)*t.pages_per_slot)+page)<-Scalar.of_bytes_exndstdone;(* The resulting vector is then interpolated. Polynomial
interpolation is a linear bijection (as a ring isomorphism)
between k-tuples of scalar elements and polynomials of degree < k
with coefficients in the scalar field.
Thus [polynomial_from_slot] is an injection from slots to
polynomials (as composition preserves injectivity). *)Ok(ifft_inplacet.max_polynomial_length(Evaluations.of_array(t.max_polynomial_length-1,coefficients)))(* [polynomial_to_slot] is the left-inverse function of
[polynomial_from_slot]. *)letpolynomial_to_slottp=(* The last operation of [polynomial_from_slot] is the interpolation,
so we undo it with an evaluation on the same domain [t.domain_polynomial_length]. *)letevaluations=fftt.max_polynomial_lengthpinletslot=Bytes.maket.slot_size'\x00'inletoffset=ref0in(* Reverse permutation from [polynomial_from_slot]. *)forpage=0tot.pages_per_slot-1doforelt=0tot.page_length-2doletidx=(elt*t.pages_per_slot)+pageinletcoeff=Scalar.to_bytes(Evaluations.getevaluationsidx)inBytes.blitcoeff0slot!offsetscalar_bytes_amount;offset:=!offset+scalar_bytes_amountdone;letidx=((t.page_length-1)*t.pages_per_slot)+pageinletcoeff=Scalar.to_bytes(Evaluations.getevaluationsidx)inBytes.blitcoeff0slot!offsett.remaining_bytes;offset:=!offset+t.remaining_bytesdone;slot(* Encoding a message P = (P_0, ... ,P_{k-1}) amounts to evaluate
its associated polynomial P(x)=sum_{i=0}^{k-1} P_i x^i at the
evaluation points [t.domain_erasure_encoded_polynomial_length].
This can be achieved with an n-points discrete Fourier transform
supported by the [Scalar] field in time O(n log n). *)letencodetp=Evaluations.to_array(fftt.erasure_encoded_polynomial_lengthp)(* The shards are arranged in cosets to produce batches of KZG proofs
for the shards efficiently.
The domain of evaluation <w> is split into cosets:
<w> = Disjoint union_{i in ⟦0, t.number_of_shards-1⟧} W_i,
where W_0 = {w^{t.number_of_shards * j}}_{j in ⟦0, t.shard_length-1⟧}
and W_i = w^i W_0 (|W_0|=t.shard_length). *)letshards_from_polynomialtp=letcodeword=encodetpinletrecloopindexseq=ifindex<0thenseqelse(* For each shard index [index], a [share] consists of the
elements from [codeword] of indices w^index W_0.
A shard consists of its [index] and associated [share]. *)letshare=Array.initt.shard_length(fun_->Scalar.(copyzero))inforj=0tot.shard_length-1doshare.(j)<-codeword.((t.number_of_shards*j)+index)done;loop(index-1)(Seq.cons{index;share}seq)inloop(t.number_of_shards-1)Seq.emptymoduleShardSet=Set.Make(structtypet=shardletcompareab=Int.comparea.indexb.indexend)letencoded_share_sizet=(* FIXME: https://gitlab.com/tezos/tezos/-/issues/4289
Improve shard size computation *)letshare_scalar_len=t.erasure_encoded_polynomial_length/t.number_of_shardsin(share_scalar_len*Scalar.size_in_bytes)+4(* Let w be a primitive [t.erasure_encoded_polynomial_length]-th root of unity, where
[t.max_polynomial_length] and [t.erasure_encoded_polynomial_length = t.redundancy_factor * t.max_polynomial_length] divide
[Scalar.order - 1]. Let F be a finite field, and in this
context we use the prime field [Scalar].
We can decode a codeword c from
RS(n, k, (w^i)_i) ≔ { (P(w^i))_{i in ⟦0, n-1⟧} | P in F[x] /\ deg P < k }
with at least k components of c. By decode we mean finding the
underlying message represented by the polynomial P such that
c=(P(w^i))_{i in ⟦0, n-1⟧} with at least k components of c.
Without loss of generality, let c̃ = (c_0, ..., c_{k-1})
be the received codeword with erasures, i.e., the first k components of
a codeword. We can retrieve the original message with any k
components of c thanks to the Lagrange interpolation polynomial P(x)
defined as follows, where the x_i=w^i are the evaluation points of c̃:
P(x) = sum_{i=0}^{k-1} c_i prod_{j=0, j != i}^{k-1} (x-x_j)/(x_i - x_j).
As detailed in https://arxiv.org/pdf/0907.1788v1.pdf, the idea is
to rewrite P(x) as a product of two polynomials A(x) and B(x) so
that the convolution theorem allows us to recover P(x) using FFTs
in O(n log n).
Asymptotic complexity is O(n log n). *)letpolynomial_from_shardstshards=letshards=(* We always consider the first k codeword vector components,
the ShardSet allows collecting distinct indices.
[Seq.take] doesn't raise any exceptions as t.max_polynomial_length / t.shard_length
is (strictly) positive.
[t.max_polynomial_length / t.shard_length] is strictly less than [t.number_of_shards].
Indeed, [t.shard_length = t.erasure_encoded_polynomial_length / t.number_of_shards] where
[t.erasure_encoded_polynomial_length = t.max_polynomial_length * t.redundancy_factor] and [t.redundancy_factor > 1].
Thus,
[t.max_polynomial_length / t.shard_length = t.number_of_shards / t.redundancy_factor < t.number_of_shards].
Here, all variables are positive integers, and [t.redundancy_factor]
divides [t.number_of_shards], [t.number_of_shards] divides [t.erasure_encoded_polynomial_length]. *)Seq.take(t.max_polynomial_length/t.shard_length)shards|>ShardSet.of_seqin(* There should be [t.max_polynomial_length / t.shard_length] distinct shard indices. *)ift.max_polynomial_length/t.shard_length>ShardSet.cardinalshardsthenError(`Not_enough_shards(Printf.sprintf"there must be at least %d shards to decode"(t.max_polynomial_length/t.shard_length)))elseifShardSet.exists(fun{share;_}->Array.lengthshare<>t.shard_length)shardsthenError(`Invalid_shard_length(Printf.sprintf"At least one shard of invalid length: expected length %d."t.shard_length))elseifShardSet.exists(fun{index;_}->index>=t.number_of_shards||index<0)shardsthenError(`Shard_index_out_of_range(Printf.sprintf"At least one shard index out of range: expected indices within \
the range [%d, %d]."0(t.number_of_shards-1)))else(* 1. Computing A(x) = prod_{i=0}^{k-1} (x - x_i).
Let A(x) ≔ prod_{i=0}^{k-1} (x-x_i) and
A_i(x) ≔ prod_{j=0, j != i}^{k-1} (x-x_j).
Let n_i ≔ c_i / A_i(x_i).
The interpolation polynomial becomes:
P(x) = A(x) sum_{i=0}^{k-1} c_i / ((x - x_i) A_i(x_i))
= A(x) sum_{i=0}^{k-1} n_i / (x - x_i).
Note that A_i(x_i) != 0 by definition, so it is invertible
in the [Scalar] field. *)(* The codewords are split into chunks called shards.
For this purpose, let [t.shard_length=t.n/t.number_of_shards]
be the length of a shard, and w a primitive n-th root
of unity.
The domain of evaluation <w> is then split into cosets:
<w> = Disjoint union_{i in ⟦0, t.number_of_shards-1⟧} W_i,
where W_0 = {w^{t.number_of_shards * j}}_{j in ⟦0, t.shard_length-1⟧}
and W_i = w^i W_0 (|W_0|=t.shard_length).
For a set of [t.max_polynomial_length / shard_length] shard indices
Z subseteq {0, t.number_of_shards-1}, we reorganize the product
A(x)=prod_{i=0}^{k-1} (x-x_i) into
A(x) = prod_{i in Z, |Z|=t.max_polynomial_length/t.shard_length} Z_i
where Z_i = prod_{w' in W_i} (x - w').
We notice that Z_0(x)=x^{|W_0|}-1 (as its roots
are the elements of a group of order dividing |W_0|)
entails Z_i(x)=x^{|W_0|}-w^{i*|W_0|}
(multiplying all terms by a constant w^i in an
integral domain), which is a sparse polynomial.
This special form for Z_i(x) allows us to compute the product of
the Z_i(x)s more efficiently with the [mul_xn] function, which
multiplies an arbitrary polynomial P with a polynomial of the
form Z_i(x) in time linear in (degree P + degree Z_i(x)).
Thus A(x) = prod_{i in Z, |Z|=k/l} x^{|W_0|}-w^{i*|W_0|}.
More formally: every element of W_i is of the form
w^i w^{t.number_of_shards j} for j in ⟦0, t.shard_length-1⟧. Thus
Z_i(w^i w^{s j}) = (w^i w^{s j})^{|W_0|}-w^{i*|W_0|}
= (w^i)^{|W_0|} (w^{s j})^{|W_0|} - w^{i*|W_0|}
= w^{i * |W_0|} * 1 - w^{i*|W_0|}=0.
So every element of W_i is a root of Z_i(x).
Moreover, Z_i(x) is a degree |W_0|=t.shard_length polynomial
so has at most [t.shard_length] roots:
Z_i(x)'s only roots are W_i
[mul acc i] computes the polynomial acc * Z_i. *)letmulacci=(* The complexity of [mul_xn] is linear in
[Polynomials.degree acc + t.shard_length]. *)Polynomials.mul_xnacct.shard_length(Scalar.negate(Domains.gett.domain_erasure_encoded_polynomial_length(i*t.shard_length)))in(* [partition_products seq] builds two polynomials whose
product is A(x) from the input shards [seq]. *)letpartition_productsseq=ShardSet.fold(fun{index;_}(l,r)->(mulrindex,l))seq(Polynomials.one,Polynomials.one)in(* The computation of [p1], [p2] has asymptotic complexity
[O((t.max_polynomial_length + t.shard_length) * (t.max_polynomial_length / t.shard_length))
= O(t.max_polynomial_length * t.number_of_shards / t.redundancy_factor)].
It is the most costly operation of this function. *)letp1,p2=partition_productsshardsin(* A(x) is the product of [p1] and [p2], and has degree [polynomial_length] *)assert(Polynomials.degreep1+Polynomials.degreep2=t.max_polynomial_length);leta_poly=polynomials_product(2*t.max_polynomial_length)[p1;p2]inassert(Polynomials.degreea_poly=t.max_polynomial_length);(* 2. Computing formal derivative of A(x). *)leta'=Polynomials.derivativea_polyin(* 3. Computing A'(w^i) = A_i(w^i).
In order to compute A_i(x) more efficiently, we use the
fact that the formal derivative A'(x) of A(x) satisfies
for all i in ⟦0, k-1⟧: A'(x_i) = A_i(x_i).
So we can compute (A_i(x_i))_i by evaluating A'(x) at the
points (w^i)_i with an FFT.
Indeed:
A'(x) = (prod_{i=0}^{k-1} (x-x_i))'
= sum_{i=0}^{k-1} (x-x_i)' prod_{j=0, j != i}^{k-1} (x-x_j)
= sum_{j=0}^{k-1} A_j(x).
So A'(x_i) = sum_{j=0}^{k-1} A_j(x_i) = A_i(x_i) as the other
polynomials A_j(x) have x_i as root. *)leteval_a'=fftt.erasure_encoded_polynomial_lengtha'in(* 4. Computing N(x) ≔ sum_{i=0}^{k-1} n_i / x_i x^i
where n_i ≔ c_i/A_i(x_i)
Writing the fraction
1 / (x_i-x) = sum_{j=0}^infty x^j / (x_i^{j+1}) as a formal power
series, we obtain
P(x) / A(x) = sum_{i=0}^{k-1} n_i / (x-x_i) mod x^k
= -sum_{i=0}^{k-1} [sum_{j=0}^{k-1} n_i / (x_i^{j+1}) x^j]
= -sum_{i=0}^{k-1} N(w^{-i}) x^i
where N(x) ≔ sum_{i=0}^{k-1} n_i / x_i x^i. *)letcompute_nteval_a'shards=letn_poly=Array.initt.erasure_encoded_polynomial_length(fun_->Scalar.(copyzero))inShardSet.iter(fun{index;share}->forj=0toArray.lengthshare-1doletc_i=share.(j)inleti=(t.number_of_shards*j)+indexinletx_i=Domains.gett.domain_erasure_encoded_polynomial_lengthiinlettmp=Evaluations.geteval_a'iinScalar.mul_inplacetmptmpx_i;(* The call below never fails, so we don't
catch exceptions.
Indeed, [tmp = A_i(x_i)] is a product of nonzero
elements and so is itself nonzero
(thus invertible). See point 1. *)Scalar.inverse_exn_inplacetmptmp;Scalar.mul_inplacetmptmpc_i;n_poly.(i)<-tmpdone)shards;Evaluations.of_array(t.erasure_encoded_polynomial_length-1,n_poly)inletn_poly=compute_nteval_a'shardsin(* 5. Computing B(x).
B(x) = P(x) / A(x) = -sum_{i=0}^{k-1} N(w^{-i}) x^i.
B(x) is thus given by the first k components
of -n * IFFT_n(N). *)letb=Polynomials.truncate~len:t.max_polynomial_length(ifft_inplacet.erasure_encoded_polynomial_lengthn_poly)inPolynomials.mul_by_scalar_inplacebScalar.(negate(of_intt.erasure_encoded_polynomial_length))b;(* 6. Computing Lagrange interpolation polynomial P(x).
The product is given by the convolution theorem:
P = A * B = IFFT_{2k}(FFT_{2k}(A) o FFT_{2k}(B))
where o is the pairwise product. *)letp=polynomials_product(2*t.max_polynomial_length)[a_poly;b]in(* P has degree [<= max_polynomial_length - 1] so [<= max_polynomial_length]
coefficients. *)Ok(Polynomials.truncate~len:t.max_polynomial_lengthp)letcommittp=letdegree=Polynomials.degreepinletsrs_g1_size=Srs_g1.sizet.srs.raw.srs_g1inifdegree>=srs_g1_sizethenError(`Invalid_degree_strictly_less_than_expected{given=degree;expected=srs_g1_size})elseOk(Srs_g1.pippengert.srs.raw.srs_g1p)letpp_commit_errorfmt(`Invalid_degree_strictly_less_than_expected{given;expected})=Format.fprintffmt"Invalid degree: expecting input polynomial to commit function to have a \
degree strictly less than %d. Got %d."expectedgivenletstring_of_commit_errorerr=Format.asprintf"%a"pp_commit_errorerr(* p(X) of degree n. Max degree that can be committed: d, which is also the
SRS's length - 1. We take d = t.max_polynomial_length - 1 since we don't want to commit
polynomials with degree greater than polynomials to be erasure-encoded.
We consider the bilinear groups (G_1, G_2, G_T) with G_1=<g> and G_2=<h>.
- Commit (p X^{d-n}) such that deg (p X^{d-n}) = d the max degree
that can be committed
- Verify: checks if e(commit(p), commit(X^{d-n})) = e(commit(p X^{d-n}), h)
using the commitments for p and p X^{d-n}, and computing the commitment for
X^{d-n} on G_2. *)(* Proves that degree(p) < t.max_polynomial_length *)(* FIXME https://gitlab.com/tezos/tezos/-/issues/4192
Generalize this function to pass the slot_size in parameter. *)letprove_commitment(t:t)p=letmax_allowed_committed_poly_degree=t.max_polynomial_length-1inletmax_committable_degree=Srs_g1.sizet.srs.raw.srs_g1-1inletoffset_monomial_degree=max_committable_degree-max_allowed_committed_poly_degreein(* Note: this reallocates a buffer of size (Srs_g1.size t.srs.raw.srs_g1)
(2^21 elements in practice), so roughly 100MB. We can get rid of the
allocation by giving an offset for the SRS in Pippenger. *)letp_with_offset=Polynomials.mul_xnpoffset_monomial_degreeScalar.(copyzero)in(* proof = commit(p X^offset_monomial_degree), with deg p < t.max_polynomial_length *)committp_with_offset(* Verifies that the degree of the committed polynomial is < t.max_polynomial_length *)letverify_commitment(t:t)cmproof=letmax_allowed_committed_poly_degree=t.max_polynomial_length-1inletmax_committable_degree=Srs_g1.sizet.srs.raw.srs_g1-1inletoffset_monomial_degree=max_committable_degree-max_allowed_committed_poly_degreeinletcommitted_offset_monomial=(* This [get] cannot raise since
[offset_monomial_degree <= t.max_polynomial_length <= Srs_g2.size t.srs.raw.srs_g2]. *)Srs_g2.gett.srs.raw.srs_g2offset_monomial_degreeinletopenBls12_381in(* checking that cm * committed_offset_monomial = proof *)Pairing.pairing_check[(cm,committed_offset_monomial);(proof,G2.(negate(copyone)))]letdiff_next_power_of_twox=(1lslZ.log2up(Z.of_intx))-x(* Notations
=========
[x]_1 (resp. [x]_2) is a shorthand for x.g where
- [x] is a scalar element of type [Scalar.t]
- [g] is a generator of the subgroup [Bls12_381.G1] (resp. [Bls12_381.G2])
- ( . ) is the elliptic curve scalar multiplication in the subgroup
[Bls12_381.G1] (resp. [Bls12_381.G2])
The SRS (Structure Reference String) is defined as:
([1]_1, [τ]_1, [τ^2]_1, [τ^3]_1, ..., [τ^{Srs_g1.length t.srs.raw.srs_g1 - 1}]_1)
where τ is secret.
e : [Bls12_381.G1] * [Bls12_381.G2] → [Bls12_381.GT] is a pairing
(bilinear, non-degenerate map such that e(g1, g2) = gT where
G1=<g1>, G2=<g2>, GT=<gT>).
Multi-reveals
=============
This feature is described in the KZG extended paper under section 3.4 as
batch opening https://link.springer.com/chapter/10.1007/978-3-642-17373-8_11
on arbitrary points. The paper https://eprint.iacr.org/2023/033 shows how
to commit and verify quickly when the points form cosets of a group of
roots of unity.
For n dividing [Scalar.order - 1], let w be a primitive n-th root of unity.
For l dividing n, let z=w^{n/l} be a primitive l-th root of unity and Z=<z>.
For i=0, ..., n/l - 1, the proof of the evaluations of P(x) at the l points
w^i Z is the KZG commitment to the quotient of the euclidean division of
P(x) by the polynomial x^l - w^{i*l} whose only roots are w^i Z.
In other words, given the euclidean division
P(x)=(x^l-w^{i*l}) * q_i(x) + r_{i}(x), deg r(x) < l,
the proof is π_i = [q_i(τ)]_1.
Opening at one point corresponds to the case l=1 where r_{i}(x)=P(w^i).
To verify the proof, we gather the alleged evaluations of P(x) at the points
w^i Z. From these possibly correct evaluations, we can construct an alleged
remainder r_{i}(x) by computing the inverse DFT on the domain w^i Z, as
r_{i}(x)=P(x) on this domain, and as r_{i}(x) is determined by its
evaluations at l distinct points. We then check
e(c-[r_i(τ)]_1, g_2) ?= e(π, [τ^l]_2 - [w^{i*l}]_2).
Multiple multi-reveals
======================
We now wish to reveal not on the domain W=<w>, but on several subdomains:
the n/l>1 cosets w^i W_0 of l elements each. The committed polynomial P(x)
has degree k-1 where k corresponds to the dimension of the Reed-Solomon code
(as a vector subspace of dimension k of F^n). We present the result from
https://eprint.iacr.org/2023/033, which assumes the size of the domains n
and of their cosets l to be powers of two for correct FFT sizes.
Computing the proofs for all such cosets would cost n/l euclidean divisions
and multi-exponentiations. Even though the euclidean division by
x^l-w^{i*l} is linear in the degree of the committed polynomial,
as well as the multi-exponentiation thanks to the Pippenger algorithm
(See https://cr.yp.to/papers/pippenger.pdf), computing all proofs leads to
a complexity O(n/l * k). It turns out the proofs for the cosets are related,
so all proofs can be computed in time O(n/l log (n/l)).
Again, for i=0, ..., n/l-1, given the euclidean division
P(x)=(x^l-w^{i*l}) q_i(x) + r_i(x), deg r_i(x) < l, the proofs
to be computed are π_i ≔ [q_i(τ)]_1.
We denote d=deg P, m the next power of 2 of (d + 1), and set
P_m, P_{m-1}, ..., P_{d+1}=0. For our purposes we further assume
l | m, l < m so that z ≔ w^l a primitive n/l-th root of unity.
The floor designates here the truncated division, where
terms x^i for i<0 are dropped:
q_i(x) = (P(x) - r_i(x)) / (x^l-w^{i*l})
= floor((P(x)-r_i(x)) / (x^l-w^{i*l}))
= floor(P(x)/(x^l-w^{i*l})) since deg r_i < l
= floor(sum_{k=0}^infty P(x)/(x^{(k+1)*l}) w^{k*i*l} (formal power series of 1/(x^l+c))
= sum_{k=0}^{m/l-1} floor(P(x)/(x^{(k+1)*l})) z^{i*k}.
So:
q_i(x) = sum_{k=0}^{m/l-1} (P_m x^{m-(k+1)*l} + P_{m-1}x^{m-(k+1)*l-1}
+ ... + P_{(k+1)*l+1}x + P_{(k+1)*l}) z^{ik}.
If l <= d < 2l, then the powers of z are absent of the quotient:
q_i(x) = P_l + P_{l+1} x + ... + P_d x^{d-l}.
In this case, all proofs are equal since their value doesn't depend on [i].
Thus,
π_i = [q_i(x)]_1
= sum_{k=0}^{m/l-1} (P_m[τ^{m-(k+1)*l}] + P_{m-1}[τ^{m-(k+1)*l-1}]
+ ... + P_{(k+1)*l+1}[τ] + P_{(k+1)*l}) z^{i*k}.
Letting
- for 0 <= k <= m/l, h_{k} ≔ sum_{j=k*l}^{m} f_j[τ^{j-k*l}]
- for m/l < k <= n/l, h_k ≔ 0,
we obtain π_i = sum_{k=0}^{n/l-1} h_{k+1} z^{i*k}.
So by definition π=(π_0, ..., π_{n/l-1}) is the
EC-DFT_z of the vector (h_1, ..., h_{n/l}) in F^{n/l} ( * ).
Now, let's address the computation of the coefficients of interest
h_k for k=1, ..., n/l. To this end, the authors of
https://eprint.iacr.org/2023/033 observe that the computation of the h_k's
can be decomposed into the computation of the l "offset" sums:
forall j=0, ...,l-1,
h_{k,j} = P_{m-j}[τ^{m-k*l-j}] + P_{m-l-j}[τ^{m-(k+1)*l-j}]
+ ... + P_{(m-j) % l + kl}[τ^{(m-j) % l}].
So the desired coefficients can then be obtained with
h_k=sum_{j=0}^{l-1} h_{k,j}. This decomposition of the calculation
allows the l vectors (h_{1,j}, ..., h_{floor((m-j)/l), j})
for j=0, ..., l-1 to be computed with l Toeplitz matrix-vector multiplications:
(h_{1,j} h_{2,j} ... h_{floor((m-j)/l) - 1, j} h_{floor((m-j)/l), j})^T
=
|P_{m-j} P_{m-l-j} P_{m-2*l-j} ... P_{(m-j)%l+2*l} P_{(m-j)%l+l} |
|0 P_{m-j} P_{m-l-j} ... P_{(m-j)%l+3*l} P_{(m-j)%l+2*l} |
|0 0 P_{m-j} ... P_{(m-j)%l+4*l} P_{(m-j)%l+3*l} |
|. . . . . . |
|. . . . . . |
|. . . . . . |
|......................................................................|
|0 0 0 ... P_{m-j} P_{m-l-j} |
|0 0 0 ... 0 P_{m-j} |
*
(τ^{m-l-j} τ^{m-2*l-j} ... τ^{(m-j)%l+l} τ^{(m-j)%l})^T
a || b is the concatenation of a and b.
We can extend this Toeplitz matrix to form a circulant matrix whose columns
are shifted versions of the vector
c=P_{m-j} || 0^{floor((m-j)/l)-1} || P_{(m-j)%l+l} ... P_{m-j-l}.
We can then compute circulant matrix-vector multiplication with the FFT.
See this presentation from Kyle Kloster, student at Purdue University:
https://www.youtube.com/watch?v=w0peHpfFVpc.
Given the euclidean divisions m-j = q*l+r, 0 <= r < l for j=0, ...,l-1:
1. Compute l EC-FFTs over G_1: forall j=0, ...,l-1,
s_j=EC-FFT_{2m/l}(srs_{m-j-l} srs_{m-j-2*l} srs_{m-j-3*l} ... srs_{m-j-q*l=r} || 0^{2m/l - floor((m-j)/l)}).
The above calculation can be done once per trusted setup and can thus be cached.
2. Compute l FFTs over the [Scalar] field: forall j=0, ..., l-1:
P'_j = FFT_{2m/l}(P_{m-j} || 0^{q+2*padding+1} || P_{r+l} P_{r+2l}
... P_{r+(q-1)l=m-j-l} || 0^{2m/l- (2*q+2*padding+1)})
where [q = floor ((m-j)/l) = quotient] and [padding] is the difference
between [quotient] and the next power of two of [quotient].
3. Then compute {h}=(h_k)_{k in ⟦1, n/l⟧} with circulant matrix-vector
multiplication via FFT:
h = sum_{j=0}^{l-1} (h_{1,j} ... h_{floor((m-j)/l), j} || 0^{2m/l- floor((m-j)/l)})
= sum_{j=0}^{l-1}EC-IFFT_{2m/l}(P'_j o_{G_1} s_j)
= EC-IFFT_{2m/l} (sum_{j=0}^{l-1} (P_j o_{G_1} s_j))
where o_{G_1} is the pairwise product for vectors with components in G_1.
4. The first n/l coefficients is the result of the multiplication by the
Toeplitz vector (with a bit of zero padding starting from the m/l-th coefficient):
let's call this vector h'. The n/l KZG proofs are given by
π=EC-FFT_{n/l}(h') following the observation ( * ).
Complexity of multiple multi-reveals
====================================
For the preprocessing part (step 1), we count l EC-FFTs on G_1, so the asymptotic complexity
of the step is O(l * (m/l) log (m/l))=O(m log(m/l)).
For the KZG proofs generation part (steps 2 to 4), we count l FFTs on the scalar field F,
two EC-FFTs on G_1, and l * 2m/l elliptic curve scalar multiplications in G_1:
the runtime complexity is O(l * T_{F}(m/l) + T_{G_1}(n/l) + m),
where T_{F} and T_{G_1} represent the runtime cost of the FFT and EC-FFT.
Both have the same complexity, even though the latter hides a bigger constant
(log of scalar size in bits, here log 256) due to the elliptic curve scalar multiplication.
Let's recall that l is in our application the length of a shard, n is the length of
the erasure code, α its redundancy factor and m ≈ k is the dimension of the erasure code.
Calling s the number of shards, we obtain l = n/s = α*k/s.
The runtime of the precomputation part can be rewritten as O(k * log (s/α)).
And the computation of the n/l KZG proofs becomes
O(k * log (s/α) + s * log s).
This explains why the algorithm is more efficient with bigger erasure code redundancies α,
especially the precomputation part as it performs EC-FFTs.
For our purposes the length of a shard s << k, so the bottleneck is the pointwise
scalar multiplication in G_1. *)(* Step 1, returns the pair made of the vectors s_j and the [domain] of length
[2 * m / l = 2 * t.max_polynomial_length / t.shard_size] used for the computation of the s_j. *)letpreprocess_multiple_multi_revealst=(* The length of a coset [t.shard_length] divides the domain length [t.max_polynomial_length].
This is because [t.shard_length] divides [t.erasure_encoded_polynomial_length], [t.max_polynomial_length] divides [t.erasure_encoded_polynomial_length]
and [t.max_polynomial_length > t.shard_length] (see why [m > 2l] above, where [m = t.max_polynomial_length] and
[l = t.shard_length] here). *)assert(t.max_polynomial_lengthmodt.shard_length=0);letdomain_length=2*t.max_polynomial_length/t.shard_lengthinletdomain=Domains.builddomain_lengthinletsrs=t.srs.raw.srs_g1in(* Computes
points = srs_{m-j-l} srs_{m-j-2l} srs_{m-j-3l} ... srs_{m-j-ql=r}
|| 0^{2m/l - floor((m-j)/l)},
s_j = EC-FFT_{2m/l}(points). *)lets_jj=(* According to the documentation of [( / )], "x / y is the greatest
integer less than or equal to the real quotient of x by y". Thus it
equals [floor (x /. y)]. *)letquotient=(t.max_polynomial_length-j)/t.shard_lengthinletpoints=G1_array.initdomain_length(funi->ifi<quotientthenSrs_g1.getsrs(t.max_polynomial_length-j-((i+1)*t.shard_length))elseBls12_381.G1.(copyzero))inG1_array.evaluation_ecfft~domain~pointsin(domain,Array.initt.shard_lengths_j)(* [multiple_multi_reveals t preprocess coefficients] returns the proofs
for each of the [t.number_of_shards] shards.
Implements the "Multiple multi-reveals" section above. *)letmultiple_multi_revealst~preprocess:(domain,sj)~coefficients:shard_proofarray=(* [t.max_polynomial_length > l] where [l = t.shard_length]. *)assert(t.shard_length<t.max_polynomial_length);(* Step 2. *)letdomain_length=Domains.lengthdomaininleth_jj=letremainder=(t.max_polynomial_length-j)modt.shard_lengthinletquotient=(t.max_polynomial_length-j)/t.shard_lengthinletpadding=diff_next_power_of_twoquotientin(* points = P_{m-j} || 0^{q+2*padding+1} || P_{r+l} P_{r+2l}
... P_{r+(q-1)l=m-j-l} || 0^{2m/l- (2*q+2*padding+1)}
where [q = floor ((m-j)/l) = quotient]. *)letpoints=Polynomials.initdomain_length(funi->letidx=remainder+((i-(quotient+(2*padding)))*t.shard_length)inifi=0thenScalar.copycoefficients.(t.max_polynomial_length-j)elseifi<=quotient+(2*padding)||idx>t.max_polynomial_length(* The second inequality is here in the case
[t.max_polynomial_length = 2*t.shard_length]
thus [domain_length = 2*t.max_polynomial_length/t.shard_length=4]
and [padding=0].
In this case, either
[quotient = 2] thus [points = P_{m-j} 0 0 P_{r+l=m-j-l}],
or [quotient = 1] thus
[points] = P_{m-j} 0 P_{m-j} 0. *)thenScalar.(copyzero)elseScalar.copycoefficients.(idx))in(* FFT of step 2. *)Evaluations.evaluation_fftdomainpointsin(* Pairwise product of step 3. *)letevaluations=Array.initt.shard_lengthh_jinleth_j=G1_array.mul_arrays~evaluations~arrays:sjin(* Sum of step 3. *)letsum=h_j.(0)infori=1tot.shard_length-1doG1_array.add_arrays_inplacesumh_j.(i)done;(* Step 3. Toeplitz matrix-vector multiplication *)G1_array.interpolation_ecfft_inplace~domain~points:sum;(* Keep first n / l coefficients *)letlen=Domains.lengthdomain/2inletpoints=G1_array.subsum~off:0~lenin(* Step 4. *)letdomain=Domains.buildt.number_of_shardsinG1_array.(to_array(evaluation_ecfft~domain~points))(* [interpolation_poly root domain evaluations] returns the unique
polynomial P of degree [< Domains.length domain] verifying
P(root * domain[i]) = evaluations[i].
Requires:
- [(Array.length evaluations = Domains.length domain)] *)letinterpolation_poly~root~domain~evaluations=assert(Array.lengthevaluations=Domains.lengthdomain);letsize=Domains.lengthdomaininletevaluations=ifft_inplacesize(Evaluations.of_array(size-1,evaluations))in(* Computes root_inverse = 1/root. *)letroot_inverse=Scalar.inverse_exnrootin(* Computes evaluations[i] = evaluations[i] * root_inverse^i. *)snd(Polynomials.fold_left_map(funroot_pow_inversecoefficient->(Scalar.mulroot_pow_inverseroot_inverse,Scalar.mulcoefficientroot_pow_inverse))Scalar.(copyone)evaluations)(* [verify t commitment srs_point domain root evaluations proof]
verifies that P(root * domain.(i)) = evaluations.(i),
where
- [P = commit t s] for some slot [s]
- [l := Array.length evaluations = Domains.length domain]
- [srs_point = Srs_g2.get t.srs.raw.srs_g2 l]
- [root = w^i] where [w] is a primitive [erasure_encoded_polynomial_length]-th root of unity for [l] dividing [erasure_encoded_polynomial_length]
- [domain = (1, z, z^2, ..., z^{l - 1})] where [z = w^{n/l}] is a primitive
[l]-th root of unity
Implements the "Multi-reveals" section above. *)letverifyt~commitment~srs_point~domain~root~evaluations~proof=letopenBls12_381inletopenResult_syntaxin(* Compute r_i(x). *)letremainder=interpolation_poly~root~domain~evaluationsin(* Compute [r_i(τ)]_1. *)let*commitment_remainder=committremainderin(* Compute [w^{i * l}]. *)letroot_pow=Scalar.powroot(Z.of_int(Domains.lengthdomain))in(* Compute [τ^l]_2 - [w^{i * l}]_2). *)letcommit_srs_point_minus_root_pow=G2.(addsrs_point(negate(mul(copyone)root_pow)))in(* Compute [r_i(τ)]_1-c. *)letdiff_commits=G1.(addcommitment_remainder(negatecommitment))in(* Checks e(c-[r_i(τ)]_1, g_2) ?= e(π, [τ^l]_2 - [w^{i * l}]_2)
by checking
[0]_1 ?= -e(c-[r_i(τ)]_1, g_2) + e(π, [τ^l]_2 - [w^{i * l}]_2)
= e([r_i(τ)]_1-c, g_2) + e(π, [τ^l]_2 - [w^{i * l}]_2). *)Ok(Pairing.pairing_check[(diff_commits,G2.(copyone));(proof,commit_srs_point_minus_root_pow);])letsave_precompute_shards_proofsprecomputation~filename=protect(fun()->Lwt_io.with_file~mode:Outputfilename(funchan->letopenLwt_result_syntaxinletstr=Data_encoding.Binary.to_string_exnEncoding.shards_proofs_precomputation_encodingprecomputationinlet*!()=Lwt_io.writechanstrinreturn_unit))lethash_precomputationprecomputation=letencoding=Data_encoding.Binary.to_bytes_exnEncoding.shards_proofs_precomputation_encodingprecomputationinTezos_crypto.Blake2B.hash_bytes[encoding]letload_precompute_shards_proofs~hash~filename()=protect(fun()->Lwt_io.with_file~mode:Inputfilename(funchan->letopenLwt_result_syntaxinlet*!str=Lwt_io.readchaninletprecomputation=Data_encoding.Binary.of_string_exnEncoding.shards_proofs_precomputation_encodingstrinlet*()=matchhashwith|Somegiven->letexpected=hash_precomputationprecomputationinifTezos_crypto.Blake2B.equalgivenexpectedthenreturn_unitelsetzfail(Invalid_precomputation_hash{given=Tezos_crypto.Blake2B.to_stringgiven;expected=Tezos_crypto.Blake2B.to_stringexpected;})|None->return_unitinreturnprecomputation))letprecompute_shards_proofst=(* Precomputes step. 1 of multiple multi-reveals. *)letdomain,precomputation=preprocess_multiple_multi_revealstin(Octez_bls12_381_polynomial.Domain.to_arraydomain,Array.mapG1_array.to_arrayprecomputation)letprove_shardst~precomputation:(domain,precomp)~polynomial=letsetup=(Domains.of_arraydomain,Array.mapG1_array.of_arrayprecomp)in(* Resizing input polynomial [p] to obtain an array of length [t.max_polynomial_length + 1]. *)letcoefficients=Array.init(t.max_polynomial_length+1)(fun_->Scalar.(copyzero))inletp_length=Polynomials.degreepolynomial+1inletp=Polynomials.to_dense_coefficientspolynomialinArray.blitp0coefficients0p_length;multiple_multi_revealst~preprocess:setup~coefficientsletverify_shard(t:t)commitment{index=shard_index;share=evaluations}proof=ifshard_index<0||shard_index>=t.number_of_shardsthenError(`Shard_index_out_of_range(Printf.sprintf"Shard index out of range: got index %d, expected index within \
range [%d, %d]."shard_index0(t.number_of_shards-1)))elseletexpected_shard_length=t.shard_lengthinletgot_shard_length=Array.lengthevaluationsinifexpected_shard_length<>got_shard_lengththenError`Shard_length_mismatchelseletroot=Domains.gett.domain_erasure_encoded_polynomial_lengthshard_indexinletdomain=Domains.buildt.shard_lengthinletsrs_point=t.srs.kate_amortized_srs_g2_shardsinmatchverifyt~commitment~srs_point~domain~root~evaluations~proofwith|Oktrue->Ok()|Okfalse->Error`Invalid_shard|Errore->Erroreletprove_pagetppage_index=ifpage_index<0||page_index>=t.pages_per_slotthenError`Page_index_out_of_rangeelseletwi=Domains.gett.domain_polynomial_lengthpage_indexinletquotient,_=Polynomials.division_xnpt.page_length_domainScalar.(negate(powwi(Z.of_intt.page_length_domain)))incommittquotient(* Parses the [slot_page] to get the evaluations that it contains. The
evaluation points are given by the [slot_page_index]. *)letverify_pagetcommitment~page_indexpageproof=ifpage_index<0||page_index>=t.pages_per_slotthenError`Page_index_out_of_rangeelseletexpected_page_length=t.page_sizeinletgot_page_length=Bytes.lengthpageinifexpected_page_length<>got_page_lengththenError`Page_length_mismatchelseletdomain=Domains.buildt.page_length_domaininletevaluations=Array.initt.page_length_domain(function|iwheni<t.page_length-1->(* Parse the [page] by chunks of [scalar_bytes_amount] bytes.
These chunks are interpreted as [Scalar.t] elements and stored
in [evaluations]. *)letdst=Bytes.createscalar_bytes_amountinBytes.blitpage(i*scalar_bytes_amount)dst0scalar_bytes_amount;Scalar.of_bytes_exndst|iwheni=t.page_length-1->(* Store the remaining bytes in the last nonzero coefficient
of evaluations. *)letdst=Bytes.createt.remaining_bytesinBytes.blitpage(i*scalar_bytes_amount)dst0t.remaining_bytes;Scalar.of_bytes_exndst|_->Scalar.(copyzero))inletroot=Domains.gett.domain_polynomial_lengthpage_indexinmatchverifyt~commitment~srs_point:t.srs.kate_amortized_srs_g2_pages~domain~root~evaluations~proofwith|Oktrue->Ok()|Okfalse->Error`Invalid_page|Errore->ErroreendincludeInnermoduleVerifier=InnermoduleInternal_for_tests=structletparameters_initialisation{slot_size;page_size;number_of_shards;redundancy_factor;_}=letlength=slot_as_polynomial_length~slot_size~page_sizeinletsecret=Bls12_381.Fr.of_string"20812168509434597367146703229805575690060615791308155437936410982393987532344"inletsrs_g1=Srs_g1.generate_insecurelengthsecretin(* The error is caught during the instantiation through [make]. *)leterasure_encoded_polynomial_length=redundancy_factor*lengthinletevaluations_per_proof=matcherasure_encoded_polynomial_length/number_of_shardswith|exceptionInvalid_argument_->0|x->xin(* The cryptobox will read at indices `size`, `1 lsl evaluations_per_proof_log`
and `page_length` so we take the max + 1. Since `page_length < size`, we
can remove the `page_length from the max. *)letsrs_g2=Srs_g2.generate_insecure(maxlengthevaluations_per_proof+1)secretin{srs_g1;srs_g2}letload_parametersparameters=initialisation_parameters:=Someparametersletmake_dummy_shards(t:t)~state=Random.set_statestate;letrecloopindexseq=ifindex=t.number_of_shardsthenseqelseletshare=Array.init(t.shard_length+1+Random.int100)(fun_->Scalar.(random~state()))inloop(index+1)(Seq.cons{index;share}seq)inloop0Seq.emptyletpolynomials_equal=Polynomials.equalletpage_proof_equal=Bls12_381.G1.eqletalter_proofproof=Bls12_381.G1.(addproofone)letalter_page_proof(proof:page_proof)=alter_proofproofletalter_shard_proof(proof:shard_proof)=alter_proofproofletalter_commitment_proof(proof:commitment_proof)=alter_proofproofletminimum_number_of_shards_to_reconstruct_slot(t:t)=t.number_of_shards/t.redundancy_factorletselect_fft_domain=select_fft_domainletprecomputation_equal((d1,a1):shards_proofs_precomputation)((d2,a2):shards_proofs_precomputation)=Array.for_all2Scalar.eqd1d2&&Array.for_all2(Array.for_all2Bls12_381.G1.eq)a1a2letreset_initialisation_parameters()=initialisation_parameters:=Noneletdummy_commitment~state()=Bls12_381.G1.random~state()letdummy_page_proof~state()=Bls12_381.G1.random~state()letdummy_shard_proof~state()=Bls12_381.G1.random~state()letmake_dummy_shard~state~index~length={index;share=Array.initlength(fun_->Scalar.(random~state()))}letnumber_of_pagest=t.pages_per_slotletshard_lengtht=t.shard_lengthletdummy_polynomial~state~degree=letrecnonzero()=letres=Bls12_381.Fr.random~state()inifBls12_381.Fr.is_zeroresthennonzero()elseresinPolynomials.init(degree+1)(funi->ifi=degreethennonzero()elseBls12_381.Fr.random~state())letsrs_size_g1t=Srs_g1.sizet.srs.raw.srs_g1letencoded_share_size=encoded_share_sizeletensure_validity{redundancy_factor;slot_size;page_size;number_of_shards}=letmax_polynomial_length=slot_as_polynomial_length~slot_size~page_sizeinleterasure_encoded_polynomial_length=redundancy_factor*max_polynomial_lengthinletshard_length=erasure_encoded_polynomial_length/number_of_shardsinletopenResult_syntaxin(let*raw=match!initialisation_parameterswith|None->fail(`Fail"Dal_cryptobox.make: DAL was not initialisated.")|Somesrs->returnsrsinensure_validity~slot_size~page_size~erasure_encoded_polynomial_length~max_polynomial_length~redundancy_factor~number_of_shards~shard_length~srs_g1_length:(Srs_g1.sizeraw.srs_g1)~srs_g2_length:(Srs_g2.sizeraw.srs_g2))|>function|Ok_->true|_->falseendmoduleConfig=structtypet=Dal_config.t={activated:bool;use_mock_srs_for_testing:parametersoption;bootstrap_peers:stringlist;}letencoding:tData_encoding.t=Dal_config.encodingletdefault=Dal_config.defaultletinit_dal~find_srs_files?(srs_size_log2=21)dal_config=letopenLwt_result_syntaxinifdal_config.activatedthenlet*initialisation_parameters=matchdal_config.use_mock_srs_for_testingwith|Someparameters->return(Internal_for_tests.parameters_initialisationparameters)|None->let*?srs_g1_path,srs_g2_path=find_srs_files()ininitialisation_parameters_from_files~srs_g1_path~srs_g2_path~srs_size_log2inLwt.return(load_parametersinitialisation_parameters)elsereturn_unitend