include module type of Torch_core.Wrapper.Tensor with type t := t
include Torch_core.Wrapper_generated_intf.S
with type t := t
and type 'a scalar := 'a Torch_core.Wrapper.Scalar.t
Sourceval _adaptive_avg_pool2d : t -> output_size:int list -> t Sourceval _adaptive_avg_pool2d_backward : grad_output:t -> t -> t Sourceval _add_batch_dim : t -> batch_dim:int -> level:int -> t Sourceval _addmv_impl_ : t -> self2:t -> mat:t -> vec:t -> t Sourceval _aminmax1 : t -> dim:int -> keepdim:bool -> t * t Sourceval _amp_update_scale :
growth_tracker:t ->
current_scale:t ->
found_inf:t ->
scale_growth_factor:float ->
scale_backoff_factor:float ->
growth_interval:int ->
t Sourceval _baddbmm_mkl_ : t -> batch1:t -> batch2:t -> t Sourceval _bmm : t -> mat2:t -> deterministic:bool -> t Sourceval _bmm_out : out:t -> t -> mat2:t -> deterministic:bool -> t Sourceval _cast_byte : t -> non_blocking:bool -> t Sourceval _cast_char : t -> non_blocking:bool -> t Sourceval _cast_double : t -> non_blocking:bool -> t Sourceval _cast_float : t -> non_blocking:bool -> t Sourceval _cast_half : t -> non_blocking:bool -> t Sourceval _cast_int : t -> non_blocking:bool -> t Sourceval _cast_long : t -> non_blocking:bool -> t Sourceval _cast_short : t -> non_blocking:bool -> t Sourceval _cat_out : out:t -> t list -> dim:int -> t Sourceval _cdist_backward : grad:t -> x1:t -> x2:t -> p:float -> cdist:t -> t Sourceval _cholesky_helper : t -> upper:bool -> t Sourceval _cholesky_solve_helper : t -> a:t -> upper:bool -> t Sourceval _coalesced_ : t -> coalesced:bool -> t Sourceval _compute_linear_combination : t -> coefficients:t -> t Sourceval _compute_linear_combination_out : out:t -> t -> coefficients:t -> t Sourceval _convolution :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
transposed:bool ->
output_padding:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
cudnn_enabled:bool ->
t Sourceval _convolution1 :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
transposed:bool ->
output_padding:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
cudnn_enabled:bool ->
allow_tf32:bool ->
t Sourceval _convolution_nogroup :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
transposed:bool ->
output_padding:int list ->
t Sourceval _copy_from : t -> dst:t -> non_blocking:bool -> t Sourceval _ctc_loss :
log_probs:t ->
targets:t ->
input_lengths:int list ->
target_lengths:int list ->
blank:int ->
zero_infinity:bool ->
t * t Sourceval _ctc_loss_backward :
grad:t ->
log_probs:t ->
targets:t ->
input_lengths:int list ->
target_lengths:int list ->
neg_log_likelihood:t ->
log_alpha:t ->
blank:int ->
zero_infinity:bool ->
t Sourceval _cudnn_ctc_loss :
log_probs:t ->
targets:t ->
input_lengths:int list ->
target_lengths:int list ->
blank:int ->
deterministic:bool ->
zero_infinity:bool ->
t * t Sourceval _cudnn_rnn :
t ->
weight:t list ->
weight_stride0:int ->
weight_buf:t option ->
hx:t ->
cx:t option ->
mode:int ->
hidden_size:int ->
proj_size:int ->
num_layers:int ->
batch_first:bool ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_sizes:int list ->
dropout_state:t option ->
t * t * t * t * t Sourceval _cudnn_rnn_flatten_weight :
weight_arr:t list ->
weight_stride0:int ->
input_size:int ->
mode:int ->
hidden_size:int ->
proj_size:int ->
num_layers:int ->
batch_first:bool ->
bidirectional:bool ->
t Sourceval _cumprod_out : out:t -> t -> dim:int -> t Sourceval _cumsum_out : out:t -> t -> dim:int -> t Sourceval _dim_arange : like:t -> dim:int -> t Sourceval _dirichlet_grad : x:t -> alpha:t -> total:t -> t Sourceval _embedding_bag :
weight:t ->
indices:t ->
offsets:t ->
scale_grad_by_freq:bool ->
mode:int ->
sparse:bool ->
per_sample_weights:t option ->
include_last_offset:bool ->
t * t * t * t Sourceval _embedding_bag_backward :
grad:t ->
indices:t ->
offsets:t ->
offset2bag:t ->
bag_size:t ->
maximum_indices:t ->
num_weights:int ->
scale_grad_by_freq:bool ->
mode:int ->
sparse:bool ->
per_sample_weights:t option ->
t Sourceval _embedding_bag_dense_backward :
grad:t ->
indices:t ->
offsets:t ->
offset2bag:t ->
bag_size:t ->
maximum_indices:t ->
num_weights:int ->
scale_grad_by_freq:bool ->
mode:int ->
per_sample_weights:t option ->
t Sourceval _embedding_bag_forward_only :
weight:t ->
indices:t ->
offsets:t ->
scale_grad_by_freq:bool ->
mode:int ->
sparse:bool ->
per_sample_weights:t option ->
include_last_offset:bool ->
t * t * t * t Sourceval _embedding_bag_per_sample_weights_backward :
grad:t ->
weight:t ->
indices:t ->
offsets:t ->
offset2bag:t ->
mode:int ->
t Sourceval _embedding_bag_sparse_backward :
grad:t ->
indices:t ->
offsets:t ->
offset2bag:t ->
bag_size:t ->
num_weights:int ->
scale_grad_by_freq:bool ->
mode:int ->
per_sample_weights:t option ->
t Sourceval _euclidean_dist : x1:t -> x2:t -> t Sourceval _fake_quantize_learnable_per_channel_affine :
t ->
scale:t ->
zero_point:t ->
axis:int ->
quant_min:int ->
quant_max:int ->
grad_factor:float ->
t Sourceval _fake_quantize_learnable_per_channel_affine_backward :
grad:t ->
t ->
scale:t ->
zero_point:t ->
axis:int ->
quant_min:int ->
quant_max:int ->
grad_factor:float ->
t * t * t Sourceval _fake_quantize_learnable_per_tensor_affine :
t ->
scale:t ->
zero_point:t ->
quant_min:int ->
quant_max:int ->
grad_factor:float ->
t Sourceval _fake_quantize_learnable_per_tensor_affine_backward :
grad:t ->
t ->
scale:t ->
zero_point:t ->
quant_min:int ->
quant_max:int ->
grad_factor:float ->
t * t * t Sourceval _fft_c2c : t -> dim:int list -> normalization:int -> forward:bool -> t Sourceval _fft_c2c_out :
out:t ->
t ->
dim:int list ->
normalization:int ->
forward:bool ->
t Sourceval _fft_c2r : t -> dim:int list -> normalization:int -> last_dim_size:int -> t Sourceval _fft_c2r_out :
out:t ->
t ->
dim:int list ->
normalization:int ->
last_dim_size:int ->
t Sourceval _fft_r2c : t -> dim:int list -> normalization:int -> onesided:bool -> t Sourceval _fft_r2c_out :
out:t ->
t ->
dim:int list ->
normalization:int ->
onesided:bool ->
t Sourceval _fused_dropout : t -> p:float -> t * t Sourceval _fw_primal : t -> level:int -> t Sourceval _gather_sparse_backward : t -> dim:int -> index:t -> grad:t -> t Sourceval _grid_sampler_2d_cpu_fallback :
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t Sourceval _grid_sampler_2d_cpu_fallback_backward :
grad_output:t ->
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t * t Sourceval _index_copy_ : t -> dim:int -> index:t -> source:t -> t Sourceval _index_put_impl_ :
t ->
indices:t option list ->
values:t ->
accumulate:bool ->
unsafe:bool ->
t Sourceval _linalg_inv_out_helper_ : t -> infos_lu:t -> infos_getri:t -> t Sourceval _linalg_qr_helper : t -> mode:string -> t * t Sourceval _linalg_solve_out_helper_ : t -> t -> infos:t -> t Sourceval _log_softmax : t -> dim:int -> half_to_float:bool -> t Sourceval _log_softmax_backward_data : grad_output:t -> output:t -> dim:int -> t -> t Sourceval _logcumsumexp : t -> dim:int -> t Sourceval _logcumsumexp_out : out:t -> t -> dim:int -> t Sourceval _lu_solve_helper : t -> lu_data:t -> lu_pivots:t -> t Sourceval _lu_with_info : t -> pivot:bool -> check_errors:bool -> t * t * t Sourceval _make_dual : primal:t -> tangent:t -> level:int -> t Sourceval _make_per_channel_quantized_tensor :
t ->
scale:t ->
zero_point:t ->
axis:int ->
t Sourceval _make_per_tensor_quantized_tensor : t -> scale:float -> zero_point:int -> t Sourceval _masked_scale : t -> mask:t -> scale:float -> t Sourceval _mkldnn_reshape : t -> shape:int list -> t Sourceval _mkldnn_transpose : t -> dim0:int -> dim1:int -> t Sourceval _mkldnn_transpose_ : t -> dim0:int -> dim1:int -> t Sourceval _mode : t -> dim:int -> keepdim:bool -> t * t Sourceval _mode_out : values:t -> indices:t -> t -> dim:int -> keepdim:bool -> t * t Sourceval _nnpack_spatial_convolution :
t ->
weight:t ->
bias:t option ->
padding:int list ->
stride:int list ->
t Sourceval _nnpack_spatial_convolution_backward_weight :
t ->
weightsize:int list ->
grad_output:t ->
padding:int list ->
t Sourceval _pack_padded_sequence : t -> lengths:t -> batch_first:bool -> t * t Sourceval _pack_padded_sequence_backward :
grad:t ->
input_size:int list ->
batch_sizes:t ->
batch_first:bool ->
t Sourceval _pdist_backward : grad:t -> t -> p:float -> pdist:t -> t Sourceval _remove_batch_dim : t -> level:int -> batch_size:int -> out_dim:int -> t Sourceval _reshape_from_tensor : t -> shape:t -> t Sourceval _saturate_weight_to_fp16 : weight:t -> t Sourceval _sobol_engine_ff_ :
t ->
n:int ->
sobolstate:t ->
dimension:int ->
num_generated:int ->
t Sourceval _sobol_engine_initialize_state_ : t -> dimension:int -> t Sourceval _sobol_engine_scramble_ : t -> ltm:t -> dimension:int -> t Sourceval _softmax : t -> dim:int -> half_to_float:bool -> t Sourceval _softmax_backward_data : grad_output:t -> output:t -> dim:int -> t -> t Sourceval _sparse_addmm : t -> sparse:t -> dense:t -> t Sourceval _sparse_log_softmax1 : t -> dim:int -> half_to_float:bool -> t Sourceval _sparse_log_softmax_backward_data :
grad_output:t ->
output:t ->
dim:int ->
t ->
t Sourceval _sparse_matrix_mask_helper : tr:t -> mask_indices:t -> t Sourceval _sparse_mm : sparse:t -> dense:t -> t Sourceval _sparse_softmax1 : t -> dim:int -> half_to_float:bool -> t Sourceval _sparse_softmax_backward_data :
grad_output:t ->
output:t ->
dim:int ->
t ->
t Sourceval _sparse_sparse_matmul : t -> t -> t Sourceval _sparse_sum2 : t -> dim:int list -> t Sourceval _sparse_sum_backward : grad:t -> t -> dim:int list -> t Sourceval _stack : t list -> dim:int -> t Sourceval _stack_out : out:t -> t list -> dim:int -> t Sourceval _standard_gamma_grad : t -> output:t -> t Sourceval _std : t -> unbiased:bool -> t Sourceval _svd_helper : t -> some:bool -> compute_uv:bool -> t * t * t Sourceval _syevd_helper : t -> compute_eigenvectors:bool -> uplo:string -> t * t Sourceval _symeig_helper : t -> eigenvectors:bool -> upper:bool -> t * t Sourceval _test_ambiguous_defaults : dummy:t -> a:int -> b:int -> t Sourceval _test_ambiguous_defaults1 : dummy:t -> a:int -> b:string -> t Sourceval _test_optional_filled_intlist : values:t -> addends:int list -> t Sourceval _test_optional_intlist : values:t -> addends:int list -> t Sourceval _test_serialization_subcmul : t -> t -> t Sourceval _test_string_default : dummy:t -> a:string -> b:string -> t Sourceval _thnn_differentiable_gru_cell_backward :
grad_hy:t ->
input_gates:t ->
hidden_gates:t ->
hx:t ->
input_bias:t option ->
hidden_bias:t option ->
t * t * t * t * t Sourceval _thnn_differentiable_lstm_cell_backward :
grad_hy:t option ->
grad_cy:t option ->
input_gates:t ->
hidden_gates:t ->
input_bias:t option ->
hidden_bias:t option ->
cx:t ->
cy:t ->
t * t * t * t * t Sourceval _thnn_fused_gru_cell :
input_gates:t ->
hidden_gates:t ->
hx:t ->
input_bias:t option ->
hidden_bias:t option ->
t * t Sourceval _thnn_fused_gru_cell_backward :
grad_hy:t ->
workspace:t ->
has_bias:bool ->
t * t * t * t * t Sourceval _thnn_fused_lstm_cell :
input_gates:t ->
hidden_gates:t ->
cx:t ->
input_bias:t option ->
hidden_bias:t option ->
t * t * t Sourceval _thnn_fused_lstm_cell_backward :
grad_hy:t option ->
grad_cy:t option ->
cx:t ->
cy:t ->
workspace:t ->
has_bias:bool ->
t * t * t * t * t Sourceval _triangular_solve_helper :
t ->
a:t ->
upper:bool ->
transpose:bool ->
unitriangular:bool ->
t * t Sourceval _trilinear :
i1:t ->
i2:t ->
i3:t ->
expand1:int list ->
expand2:int list ->
expand3:int list ->
sumdim:int list ->
unroll_dim:int ->
t Sourceval _unique : t -> sorted:bool -> return_inverse:bool -> t * t Sourceval _unique2 :
t ->
sorted:bool ->
return_inverse:bool ->
return_counts:bool ->
t * t * t Sourceval _unpack_dual : dual:t -> level:int -> t * t Sourceval _unsafe_view : t -> size:int list -> t Sourceval _var : t -> unbiased:bool -> t Sourceval _weight_norm : v:t -> g:t -> dim:int -> t Sourceval _weight_norm_cuda_interface : v:t -> g:t -> dim:int -> t * t Sourceval _weight_norm_cuda_interface_backward :
grad_w:t ->
saved_v:t ->
saved_g:t ->
saved_norms:t ->
dim:int ->
t * t Sourceval _weight_norm_differentiable_backward :
grad_w:t ->
saved_v:t ->
saved_g:t ->
saved_norms:t ->
dim:int ->
t * t Sourceval adaptive_avg_pool1d : t -> output_size:int list -> t Sourceval adaptive_avg_pool2d : t -> output_size:int list -> t Sourceval adaptive_avg_pool2d_out : out:t -> t -> output_size:int list -> t Sourceval adaptive_avg_pool3d : t -> output_size:int list -> t Sourceval adaptive_avg_pool3d_backward : grad_output:t -> t -> t Sourceval adaptive_avg_pool3d_backward_out : grad_input:t -> grad_output:t -> t -> t Sourceval adaptive_avg_pool3d_out : out:t -> t -> output_size:int list -> t Sourceval adaptive_max_pool1d : t -> output_size:int list -> t * t Sourceval adaptive_max_pool2d : t -> output_size:int list -> t * t Sourceval adaptive_max_pool2d_backward : grad_output:t -> t -> indices:t -> t Sourceval adaptive_max_pool2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
indices:t ->
t Sourceval adaptive_max_pool2d_out :
out:t ->
indices:t ->
t ->
output_size:int list ->
t * t Sourceval adaptive_max_pool3d : t -> output_size:int list -> t * t Sourceval adaptive_max_pool3d_backward : grad_output:t -> t -> indices:t -> t Sourceval adaptive_max_pool3d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
indices:t ->
t Sourceval adaptive_max_pool3d_out :
out:t ->
indices:t ->
t ->
output_size:int list ->
t * t Sourceval addbmm_ : t -> batch1:t -> batch2:t -> t Sourceval addbmm_out : out:t -> t -> batch1:t -> batch2:t -> t Sourceval addcdiv : t -> tensor1:t -> tensor2:t -> t Sourceval addcdiv_ : t -> tensor1:t -> tensor2:t -> t Sourceval addcdiv_out : out:t -> t -> tensor1:t -> tensor2:t -> t Sourceval addcmul : t -> tensor1:t -> tensor2:t -> t Sourceval addcmul_ : t -> tensor1:t -> tensor2:t -> t Sourceval addcmul_out : out:t -> t -> tensor1:t -> tensor2:t -> t Sourceval addmm_out : out:t -> t -> mat1:t -> mat2:t -> t Sourceval addmv_out : out:t -> t -> mat:t -> vec:t -> t Sourceval addr_out : out:t -> t -> vec1:t -> vec2:t -> t Sourceval affine_grid_generator : theta:t -> size:int list -> align_corners:bool -> t Sourceval affine_grid_generator_backward :
grad:t ->
size:int list ->
align_corners:bool ->
t Sourceval align_tensors : t list -> t list Sourceval all1 : t -> dim:int -> keepdim:bool -> t Sourceval all_out : out:t -> t -> dim:int -> keepdim:bool -> t Sourceval alpha_dropout : t -> p:float -> train:bool -> t Sourceval alpha_dropout_ : t -> p:float -> train:bool -> t Sourceval amax : t -> dim:int list -> keepdim:bool -> t Sourceval amax_out : out:t -> t -> dim:int list -> keepdim:bool -> t Sourceval amin : t -> dim:int list -> keepdim:bool -> t Sourceval amin_out : out:t -> t -> dim:int list -> keepdim:bool -> t Sourceval any1 : t -> dim:int -> keepdim:bool -> t Sourceval any_out : out:t -> t -> dim:int -> keepdim:bool -> t Sourceval argmax_out : out:t -> t -> dim:int -> keepdim:bool -> t Sourceval argmin : t -> dim:int -> keepdim:bool -> t Sourceval argmin_out : out:t -> t -> dim:int -> keepdim:bool -> t Sourceval argsort : t -> dim:int -> descending:bool -> t Sourceval as_strided :
t ->
size:int list ->
stride:int list ->
storage_offset:int ->
t Sourceval as_strided_ :
t ->
size:int list ->
stride:int list ->
storage_offset:int ->
t Sourceval atleast_1d1 : t list -> t list Sourceval atleast_2d1 : t list -> t list Sourceval atleast_3d1 : t list -> t list Sourceval avg_pool1d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
t Sourceval avg_pool2d_backward :
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool2d_out :
out:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool3d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool3d_backward :
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool3d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval avg_pool3d_out :
out:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
ceil_mode:bool ->
count_include_pad:bool ->
divisor_override:int ->
t Sourceval baddbmm : t -> batch1:t -> batch2:t -> t Sourceval baddbmm_ : t -> batch1:t -> batch2:t -> t Sourceval baddbmm_out : out:t -> t -> batch1:t -> batch2:t -> t Sourceval batch_norm :
t ->
weight:t option ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
training:bool ->
momentum:float ->
eps:float ->
cudnn_enabled:bool ->
t Sourceval batch_norm_backward_elemt :
grad_out:t ->
t ->
mean:t ->
invstd:t ->
weight:t option ->
mean_dy:t ->
mean_dy_xmu:t ->
t Sourceval batch_norm_backward_reduce :
grad_out:t ->
t ->
mean:t ->
invstd:t ->
weight:t option ->
input_g:bool ->
weight_g:bool ->
bias_g:bool ->
t * t * t * t Sourceval batch_norm_elemt :
t ->
weight:t option ->
bias:t option ->
mean:t ->
invstd:t ->
eps:float ->
t Sourceval batch_norm_elemt_out :
out:t ->
t ->
weight:t option ->
bias:t option ->
mean:t ->
invstd:t ->
eps:float ->
t Sourceval batch_norm_gather_stats :
t ->
mean:t ->
invstd:t ->
running_mean:t option ->
running_var:t option ->
momentum:float ->
eps:float ->
count:int ->
t * t Sourceval batch_norm_gather_stats_with_counts :
t ->
mean:t ->
invstd:t ->
running_mean:t option ->
running_var:t option ->
momentum:float ->
eps:float ->
counts:t ->
t * t Sourceval batch_norm_stats : t -> eps:float -> t * t Sourceval batch_norm_update_stats :
t ->
running_mean:t option ->
running_var:t option ->
momentum:float ->
t * t Sourceval bernoulli1 : t -> p:float -> t Sourceval bernoulli_1 : t -> p:float -> t Sourceval bilinear : input1:t -> input2:t -> weight:t -> bias:t option -> t Sourceval bincount : t -> weights:t option -> minlength:int -> t Sourceval broadcast_tensors : t list -> t list Sourceval broadcast_to : t -> size:int list -> t Sourceval bucketize : t -> boundaries:t -> out_int32:bool -> right:bool -> t Sourceval bucketize_out :
out:t ->
t ->
boundaries:t ->
out_int32:bool ->
right:bool ->
t Sourceval cat_out : out:t -> t list -> dim:int -> t Sourceval cauchy_ : t -> median:float -> sigma:float -> t Sourceval cdist : x1:t -> x2:t -> p:float -> compute_mode:int -> t Sourceval chain_matmul : matrices:t list -> t Sourceval channel_shuffle : t -> groups:int -> t Sourceval cholesky : t -> upper:bool -> t Sourceval cholesky_inverse : t -> upper:bool -> t Sourceval cholesky_inverse_out : out:t -> t -> upper:bool -> t Sourceval cholesky_out : out:t -> t -> upper:bool -> t Sourceval cholesky_solve : t -> input2:t -> upper:bool -> t Sourceval cholesky_solve_out : out:t -> t -> input2:t -> upper:bool -> t Sourceval choose_qparams_optimized :
t ->
numel:int ->
n_bins:int ->
ratio:float ->
bit_width:int ->
t * t Sourceval chunk : t -> chunks:int -> dim:int -> t list Sourceval col2im :
t ->
output_size:int list ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval col2im_backward :
grad_output:t ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval col2im_backward_out :
grad_input:t ->
grad_output:t ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval col2im_out :
out:t ->
t ->
output_size:int list ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval column_stack_out : out:t -> t list -> t Sourceval combinations : t -> r:int -> with_replacement:bool -> t Sourceval complex_out : out:t -> real:t -> imag:t -> t Sourceval constant_pad_nd : t -> pad:int list -> t Sourceval conv1d :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
groups:int ->
t Sourceval conv3d :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
groups:int ->
t Sourceval conv_tbc : t -> weight:t -> bias:t -> pad:int -> t Sourceval conv_tbc_backward : t -> t -> weight:t -> bias:t -> pad:int -> t * t * t Sourceval conv_transpose1d :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
groups:int ->
dilation:int list ->
t Sourceval conv_transpose3d :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
groups:int ->
dilation:int list ->
t Sourceval convolution :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
transposed:bool ->
output_padding:int list ->
groups:int ->
t Sourceval convolution_overrideable :
t ->
weight:t ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
transposed:bool ->
output_padding:int list ->
groups:int ->
t Sourceval copy_sparse_to_sparse_ : t -> src:t -> non_blocking:bool -> t Sourceval cosine_similarity : x1:t -> x2:t -> dim:int -> eps:float -> t Sourceval cross_out : out:t -> t -> t -> dim:int -> t Sourceval ctc_loss :
log_probs:t ->
targets:t ->
input_lengths:int list ->
target_lengths:int list ->
blank:int ->
reduction:Torch_core.Reduction.t ->
zero_infinity:bool ->
t Sourceval cudnn_affine_grid_generator :
theta:t ->
n:int ->
c:int ->
h:int ->
w:int ->
t Sourceval cudnn_affine_grid_generator_backward :
grad:t ->
n:int ->
c:int ->
h:int ->
w:int ->
t Sourceval cudnn_batch_norm :
t ->
weight:t ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
training:bool ->
exponential_average_factor:float ->
epsilon:float ->
t * t * t * t Sourceval cudnn_batch_norm_backward :
t ->
grad_output:t ->
weight:t ->
running_mean:t option ->
running_var:t option ->
save_mean:t option ->
save_var:t option ->
epsilon:float ->
reservespace:t ->
t * t * t Sourceval cudnn_convolution :
t ->
weight:t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval cudnn_convolution1 :
t ->
weight:t ->
bias:t option ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval cudnn_convolution2 :
t ->
weight:t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
allow_tf32:bool ->
t Sourceval cudnn_convolution_backward_weight :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
allow_tf32:bool ->
t Sourceval cudnn_convolution_transpose :
t ->
weight:t ->
padding:int list ->
output_padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval cudnn_convolution_transpose1 :
t ->
weight:t ->
bias:t option ->
padding:int list ->
output_padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval cudnn_convolution_transpose2 :
t ->
weight:t ->
padding:int list ->
output_padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
allow_tf32:bool ->
t Sourceval cudnn_convolution_transpose_backward_weight :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
allow_tf32:bool ->
t Sourceval cudnn_grid_sampler : t -> grid:t -> t Sourceval cudnn_grid_sampler_backward : t -> grid:t -> grad_output:t -> t * t Sourceval cummax_out : values:t -> indices:t -> t -> dim:int -> t * t Sourceval cummaxmin_backward : grad:t -> t -> indices:t -> dim:int -> t Sourceval cummin_out : values:t -> indices:t -> t -> dim:int -> t * t Sourceval cumprod_backward : grad:t -> t -> dim:int -> t Sourceval dequantize1 : t list -> t list Sourceval diag_backward : grad:t -> input_sizes:int list -> diagonal:int -> t Sourceval diag_embed : t -> offset:int -> dim1:int -> dim2:int -> t Sourceval diag_out : out:t -> t -> diagonal:int -> t Sourceval diagflat : t -> offset:int -> t Sourceval diagonal : t -> offset:int -> dim1:int -> dim2:int -> t Sourceval diagonal_backward :
grad:t ->
input_sizes:int list ->
offset:int ->
dim1:int ->
dim2:int ->
t Sourceval diff : t -> n:int -> dim:int -> prepend:t option -> append:t option -> t Sourceval diff_out :
out:t ->
t ->
n:int ->
dim:int ->
prepend:t option ->
append:t option ->
t Sourceval div2 : t -> t -> rounding_mode:string -> t Sourceval div_2 : t -> t -> rounding_mode:string -> t Sourceval div_out1 : out:t -> t -> t -> rounding_mode:string -> t Sourceval divide2 : t -> t -> rounding_mode:string -> t Sourceval divide_2 : t -> t -> rounding_mode:string -> t Sourceval divide_out1 : out:t -> t -> t -> rounding_mode:string -> t Sourceval dropout_ : t -> p:float -> train:bool -> t Sourceval eig : t -> eigenvectors:bool -> t * t Sourceval eig_out : e:t -> v:t -> t -> eigenvectors:bool -> t * t Sourceval einsum : equation:string -> t list -> t Sourceval embedding :
weight:t ->
indices:t ->
padding_idx:int ->
scale_grad_by_freq:bool ->
sparse:bool ->
t Sourceval embedding_backward :
grad:t ->
indices:t ->
num_weights:int ->
padding_idx:int ->
scale_grad_by_freq:bool ->
sparse:bool ->
t Sourceval embedding_bag :
weight:t ->
indices:t ->
offsets:t ->
scale_grad_by_freq:bool ->
mode:int ->
sparse:bool ->
per_sample_weights:t option ->
include_last_offset:bool ->
t * t * t * t Sourceval embedding_dense_backward :
grad_output:t ->
indices:t ->
num_weights:int ->
padding_idx:int ->
scale_grad_by_freq:bool ->
t Sourceval embedding_renorm_ :
t ->
indices:t ->
max_norm:float ->
norm_type:float ->
t Sourceval embedding_sparse_backward :
grad:t ->
indices:t ->
num_weights:int ->
padding_idx:int ->
scale_grad_by_freq:bool ->
t Sourceval empty_out : out:t -> size:int list -> t Sourceval empty_quantized : size:int list -> qtensor:t -> t Sourceval expand : t -> size:int list -> implicit:bool -> t Sourceval exponential_ : t -> lambd:float -> t Sourceval eye_out1 : out:t -> n:int -> m:int -> t Sourceval fake_quantize_per_channel_affine :
t ->
scale:t ->
zero_point:t ->
axis:int ->
quant_min:int ->
quant_max:int ->
t Sourceval fake_quantize_per_channel_affine_cachemask :
t ->
scale:t ->
zero_point:t ->
axis:int ->
quant_min:int ->
quant_max:int ->
t * t Sourceval fake_quantize_per_channel_affine_cachemask_backward : grad:t -> mask:t -> t Sourceval fake_quantize_per_tensor_affine :
t ->
scale:float ->
zero_point:int ->
quant_min:int ->
quant_max:int ->
t Sourceval fake_quantize_per_tensor_affine_cachemask :
t ->
scale:float ->
zero_point:int ->
quant_min:int ->
quant_max:int ->
t * t Sourceval fake_quantize_per_tensor_affine_cachemask_backward : grad:t -> mask:t -> t Sourceval fbgemm_linear_fp16_weight : t -> packed_weight:t -> bias:t -> t Sourceval fbgemm_linear_fp16_weight_fp32_activation :
t ->
packed_weight:t ->
bias:t ->
t Sourceval fbgemm_pack_gemm_matrix_fp16 : t -> t Sourceval fbgemm_pack_quantized_matrix : t -> t Sourceval fbgemm_pack_quantized_matrix1 : t -> k:int -> n:int -> t Sourceval feature_alpha_dropout : t -> p:float -> train:bool -> t Sourceval feature_alpha_dropout_ : t -> p:float -> train:bool -> t Sourceval feature_dropout : t -> p:float -> train:bool -> t Sourceval feature_dropout_ : t -> p:float -> train:bool -> t Sourceval fft_fft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_fft2 : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_fft2_out : out:t -> t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_fft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_fftfreq_out : out:t -> n:int -> d:float -> t Sourceval fft_fftn : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_fftn_out : out:t -> t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_fftshift : t -> dim:int list -> t Sourceval fft_hfft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_hfft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_ifft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_ifft2 : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_ifft2_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval fft_ifft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_ifftn : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_ifftn_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval fft_ifftshift : t -> dim:int list -> t Sourceval fft_ihfft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_ihfft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_irfft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_irfft2 : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_irfft2_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval fft_irfft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_irfftn : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_irfftn_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval fft_rfft : t -> n:int -> dim:int -> norm:string -> t Sourceval fft_rfft2 : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_rfft2_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval fft_rfft_out : out:t -> t -> n:int -> dim:int -> norm:string -> t Sourceval fft_rfftfreq_out : out:t -> n:int -> d:float -> t Sourceval fft_rfftn : t -> s:int list -> dim:int list -> norm:string -> t Sourceval fft_rfftn_out :
out:t ->
t ->
s:int list ->
dim:int list ->
norm:string ->
t Sourceval flip : t -> dims:int list -> t Sourceval float_power_1 : t -> exponent:t -> t Sourceval float_power_out : out:t -> t -> exponent:t -> t Sourceval fractional_max_pool2d :
t ->
kernel_size:int list ->
output_size:int list ->
random_samples:t ->
t * t Sourceval fractional_max_pool2d_backward :
grad_output:t ->
t ->
kernel_size:int list ->
output_size:int list ->
indices:t ->
t Sourceval fractional_max_pool2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
output_size:int list ->
indices:t ->
t Sourceval fractional_max_pool2d_out :
output:t ->
indices:t ->
t ->
kernel_size:int list ->
output_size:int list ->
random_samples:t ->
t * t Sourceval fractional_max_pool3d :
t ->
kernel_size:int list ->
output_size:int list ->
random_samples:t ->
t * t Sourceval fractional_max_pool3d_backward :
grad_output:t ->
t ->
kernel_size:int list ->
output_size:int list ->
indices:t ->
t Sourceval fractional_max_pool3d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
output_size:int list ->
indices:t ->
t Sourceval fractional_max_pool3d_out :
output:t ->
indices:t ->
t ->
kernel_size:int list ->
output_size:int list ->
random_samples:t ->
t * t Sourceval frobenius_norm1 : t -> dim:int list -> keepdim:bool -> t Sourceval frobenius_norm_out : out:t -> t -> dim:int list -> keepdim:bool -> t Sourceval gather : t -> dim:int -> index:t -> sparse_grad:bool -> t Sourceval gather_backward :
grad:t ->
t ->
dim:int ->
index:t ->
sparse_grad:bool ->
t Sourceval gather_out : out:t -> t -> dim:int -> index:t -> sparse_grad:bool -> t Sourceval geometric_ : t -> p:float -> t Sourceval glu_backward : grad_output:t -> t -> dim:int -> t Sourceval glu_backward_out : grad_input:t -> grad_output:t -> t -> dim:int -> t Sourceval glu_out : out:t -> t -> dim:int -> t Sourceval greater_equal_out1 : out:t -> t -> t -> t Sourceval grid_sampler :
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t Sourceval grid_sampler_2d :
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t Sourceval grid_sampler_2d_backward :
grad_output:t ->
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t * t Sourceval grid_sampler_3d :
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t Sourceval grid_sampler_3d_backward :
grad_output:t ->
t ->
grid:t ->
interpolation_mode:int ->
padding_mode:int ->
align_corners:bool ->
t * t Sourceval group_norm :
t ->
num_groups:int ->
weight:t option ->
bias:t option ->
eps:float ->
cudnn_enabled:bool ->
t Sourceval gru :
t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_first:bool ->
t * t Sourceval gru1 :
data:t ->
batch_sizes:t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
t * t Sourceval gru_cell :
t ->
hx:t ->
w_ih:t ->
w_hh:t ->
b_ih:t option ->
b_hh:t option ->
t Sourceval hardsigmoid_backward : grad_output:t -> t -> t Sourceval hardswish_backward : grad_output:t -> t -> t Sourceval heaviside_out : out:t -> t -> values:t -> t Sourceval histc_out : out:t -> t -> bins:int -> t Sourceval hspmm_out : out:t -> mat1:t -> mat2:t -> t Sourceval im2col :
t ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval im2col_backward :
grad_output:t ->
input_size:int list ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval im2col_backward_out :
grad_input:t ->
grad_output:t ->
input_size:int list ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval im2col_out :
out:t ->
t ->
kernel_size:int list ->
dilation:int list ->
padding:int list ->
stride:int list ->
t Sourceval index : t -> indices:t option list -> t Sourceval index_add : t -> dim:int -> index:t -> source:t -> t Sourceval index_add_ : t -> dim:int -> index:t -> source:t -> t Sourceval index_copy : t -> dim:int -> index:t -> source:t -> t Sourceval index_copy_ : t -> dim:int -> index:t -> source:t -> t Sourceval index_fill1 : t -> dim:int -> index:t -> value:t -> t Sourceval index_fill_1 : t -> dim:int -> index:t -> value:t -> t Sourceval index_put : t -> indices:t option list -> values:t -> accumulate:bool -> t Sourceval index_put_ : t -> indices:t option list -> values:t -> accumulate:bool -> t Sourceval index_select : t -> dim:int -> index:t -> t Sourceval index_select_backward :
grad:t ->
self_sizes:int list ->
dim:int ->
index:t ->
t Sourceval index_select_out : out:t -> t -> dim:int -> index:t -> t Sourceval infinitely_differentiable_gelu_backward : grad:t -> t -> t Sourceval instance_norm :
t ->
weight:t option ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
use_input_stats:bool ->
momentum:float ->
eps:float ->
cudnn_enabled:bool ->
t Sourceval isclose : t -> t -> rtol:float -> atol:float -> equal_nan:bool -> t Sourceval istft :
t ->
n_fft:int ->
hop_length:int ->
win_length:int ->
window:t option ->
center:bool ->
normalized:bool ->
onesided:bool ->
length:int ->
return_complex:bool ->
t Sourceval kthvalue : t -> k:int -> dim:int -> keepdim:bool -> t * t Sourceval kthvalue_out :
values:t ->
indices:t ->
t ->
k:int ->
dim:int ->
keepdim:bool ->
t * t Sourceval layer_norm :
t ->
normalized_shape:int list ->
weight:t option ->
bias:t option ->
eps:float ->
cudnn_enable:bool ->
t Sourceval lerp_out1 : out:t -> t -> end_:t -> weight:t -> t Sourceval linalg_cholesky_out : out:t -> t -> t Sourceval linalg_cond1 : t -> p:string -> t Sourceval linalg_cond_out1 : out:t -> t -> p:string -> t Sourceval linalg_eigh : t -> uplo:string -> t * t Sourceval linalg_eigh_out : eigvals:t -> eigvecs:t -> t -> uplo:string -> t * t Sourceval linalg_eigvalsh : t -> uplo:string -> t Sourceval linalg_eigvalsh_out : out:t -> t -> uplo:string -> t Sourceval linalg_matrix_rank : t -> tol:float -> hermitian:bool -> t Sourceval linalg_matrix_rank_out : out:t -> t -> tol:float -> hermitian:bool -> t Sourceval linalg_pinv : t -> rcond:float -> hermitian:bool -> t Sourceval linalg_pinv1 : t -> rcond:t -> hermitian:bool -> t Sourceval linalg_pinv_out : out:t -> t -> rcond:float -> hermitian:bool -> t Sourceval linalg_pinv_out1 : out:t -> t -> rcond:t -> hermitian:bool -> t Sourceval linalg_qr : t -> mode:string -> t * t Sourceval linalg_qr_out : q:t -> r:t -> t -> mode:string -> t * t Sourceval linalg_slogdet_out : sign:t -> logabsdet:t -> t -> t * t Sourceval linalg_svd : t -> full_matrices:bool -> compute_uv:bool -> t * t * t Sourceval linalg_svd_out :
u:t ->
s:t ->
v:t ->
t ->
full_matrices:bool ->
compute_uv:bool ->
t * t * t Sourceval linalg_tensorinv : t -> ind:int -> t Sourceval linalg_tensorinv_out : out:t -> t -> ind:int -> t Sourceval linalg_tensorsolve : t -> t -> dims:int list -> t Sourceval linalg_tensorsolve_out : out:t -> t -> t -> dims:int list -> t Sourceval linear : t -> weight:t -> bias:t option -> t Sourceval log_normal_ : t -> mean:float -> std:float -> t Sourceval log_sigmoid_backward : grad_output:t -> t -> buffer:t -> t Sourceval log_sigmoid_backward_out :
grad_input:t ->
grad_output:t ->
t ->
buffer:t ->
t Sourceval logcumsumexp : t -> dim:int -> t Sourceval logcumsumexp_out : out:t -> t -> dim:int -> t Sourceval logit_backward : grad_output:t -> t -> eps:float -> t Sourceval logit_backward_out : grad_input:t -> grad_output:t -> t -> eps:float -> t Sourceval logit_out : out:t -> t -> eps:float -> t Sourceval logsumexp : t -> dim:int list -> keepdim:bool -> t Sourceval logsumexp_out : out:t -> t -> dim:int list -> keepdim:bool -> t Sourceval lstm :
t ->
hx:t list ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_first:bool ->
t * t * t Sourceval lstm1 :
data:t ->
batch_sizes:t ->
hx:t list ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
t * t * t Sourceval lstm_cell :
t ->
hx:t list ->
w_ih:t ->
w_hh:t ->
b_ih:t option ->
b_hh:t option ->
t * t Sourceval lu_solve : t -> lu_data:t -> lu_pivots:t -> t Sourceval lu_solve_out : out:t -> t -> lu_data:t -> lu_pivots:t -> t Sourceval masked_fill1 : t -> mask:t -> value:t -> t Sourceval masked_fill_1 : t -> mask:t -> value:t -> t Sourceval masked_scatter : t -> mask:t -> source:t -> t Sourceval masked_scatter_ : t -> mask:t -> source:t -> t Sourceval masked_select_backward : grad:t -> t -> mask:t -> t Sourceval masked_select_out : out:t -> t -> mask:t -> t Sourceval matrix_exp_backward : t -> grad:t -> t Sourceval matrix_power : t -> n:int -> t Sourceval matrix_rank : t -> symmetric:bool -> t Sourceval matrix_rank1 : t -> tol:float -> symmetric:bool -> t Sourceval max2 : t -> dim:int -> keepdim:bool -> t * t Sourceval max_out1 : max:t -> max_values:t -> t -> dim:int -> keepdim:bool -> t * t Sourceval max_pool1d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval max_pool1d_with_indices :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t * t Sourceval max_pool2d_with_indices :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t * t Sourceval max_pool2d_with_indices_backward :
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
indices:t ->
t Sourceval max_pool2d_with_indices_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
indices:t ->
t Sourceval max_pool2d_with_indices_out :
out:t ->
indices:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t * t Sourceval max_pool3d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval max_pool3d_with_indices :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t * t Sourceval max_pool3d_with_indices_backward :
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
indices:t ->
t Sourceval max_pool3d_with_indices_backward_out :
grad_input:t ->
grad_output:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
indices:t ->
t Sourceval max_pool3d_with_indices_out :
out:t ->
indices:t ->
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t * t Sourceval max_unpool2d : t -> indices:t -> output_size:int list -> t Sourceval max_unpool2d_backward :
grad_output:t ->
t ->
indices:t ->
output_size:int list ->
t Sourceval max_unpool2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
indices:t ->
output_size:int list ->
t Sourceval max_unpool2d_out : out:t -> t -> indices:t -> output_size:int list -> t Sourceval max_unpool3d :
t ->
indices:t ->
output_size:int list ->
stride:int list ->
padding:int list ->
t Sourceval max_unpool3d_backward :
grad_output:t ->
t ->
indices:t ->
output_size:int list ->
stride:int list ->
padding:int list ->
t Sourceval max_unpool3d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
indices:t ->
output_size:int list ->
stride:int list ->
padding:int list ->
t Sourceval max_unpool3d_out :
out:t ->
t ->
indices:t ->
output_size:int list ->
stride:int list ->
padding:int list ->
t Sourceval min2 : t -> dim:int -> keepdim:bool -> t * t Sourceval min_out1 : min:t -> min_indices:t -> t -> dim:int -> keepdim:bool -> t * t Sourceval miopen_batch_norm :
t ->
weight:t ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
training:bool ->
exponential_average_factor:float ->
epsilon:float ->
t * t * t Sourceval miopen_batch_norm_backward :
t ->
grad_output:t ->
weight:t ->
running_mean:t option ->
running_var:t option ->
save_mean:t option ->
save_var:t option ->
epsilon:float ->
t * t * t Sourceval miopen_convolution :
t ->
weight:t ->
bias:t option ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_convolution_backward_bias : grad_output:t -> t Sourceval miopen_convolution_backward_weight :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_convolution_transpose :
t ->
weight:t ->
bias:t option ->
padding:int list ->
output_padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_convolution_transpose_backward_weight :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_depthwise_convolution :
t ->
weight:t ->
bias:t option ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_depthwise_convolution_backward_weight :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
benchmark:bool ->
deterministic:bool ->
t Sourceval miopen_rnn :
t ->
weight:t list ->
weight_stride0:int ->
hx:t ->
cx:t option ->
mode:int ->
hidden_size:int ->
num_layers:int ->
batch_first:bool ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_sizes:int list ->
dropout_state:t option ->
t * t * t * t * t Sourceval mkldnn_adaptive_avg_pool2d : t -> output_size:int list -> t Sourceval mkldnn_convolution :
t ->
weight:t ->
bias:t option ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
t Sourceval mkldnn_convolution_backward_weights :
weight_size:int list ->
grad_output:t ->
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
bias_defined:bool ->
t * t Sourceval mkldnn_linear : t -> weight:t -> bias:t option -> t Sourceval mkldnn_linear_backward_weights :
grad_output:t ->
t ->
weight:t ->
bias_defined:bool ->
t * t Sourceval mkldnn_max_pool2d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval mkldnn_max_pool3d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval mkldnn_reorder_conv2d_weight :
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
t Sourceval mkldnn_reorder_conv3d_weight :
t ->
padding:int list ->
stride:int list ->
dilation:int list ->
groups:int ->
t Sourceval mode : t -> dim:int -> keepdim:bool -> t * t Sourceval mode_out : values:t -> indices:t -> t -> dim:int -> keepdim:bool -> t * t Sourceval moveaxis : t -> source:int list -> destination:int list -> t Sourceval moveaxis1 : t -> source:int -> destination:int -> t Sourceval movedim : t -> source:int list -> destination:int list -> t Sourceval movedim1 : t -> source:int -> destination:int -> t Sourceval multinomial : t -> num_samples:int -> replacement:bool -> t Sourceval multinomial_out : out:t -> t -> num_samples:int -> replacement:bool -> t Sourceval nan_to_num : t -> nan:float -> posinf:float -> neginf:float -> t Sourceval nan_to_num_ : t -> nan:float -> posinf:float -> neginf:float -> t Sourceval nan_to_num_out :
out:t ->
t ->
nan:float ->
posinf:float ->
neginf:float ->
t Sourceval nanquantile : t -> q:float -> dim:int -> keepdim:bool -> t Sourceval nanquantile1 : t -> q:t -> dim:int -> keepdim:bool -> t Sourceval nanquantile_out : out:t -> t -> q:float -> dim:int -> keepdim:bool -> t Sourceval nanquantile_out1 : out:t -> t -> q:t -> dim:int -> keepdim:bool -> t Sourceval narrow : t -> dim:int -> start:int -> length:int -> t Sourceval narrow1 : t -> dim:int -> start:t -> length:int -> t Sourceval narrow_copy : t -> dim:int -> start:int -> length:int -> t Sourceval narrow_copy_out : out:t -> t -> dim:int -> start:int -> length:int -> t Sourceval native_batch_norm :
t ->
weight:t option ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
training:bool ->
momentum:float ->
eps:float ->
t * t * t Sourceval native_batch_norm_out :
out:t ->
save_mean:t ->
save_invstd:t ->
t ->
weight:t option ->
bias:t option ->
running_mean:t option ->
running_var:t option ->
training:bool ->
momentum:float ->
eps:float ->
t * t * t Sourceval native_group_norm :
t ->
weight:t option ->
bias:t option ->
n:int ->
c:int ->
hxw:int ->
group:int ->
eps:float ->
t * t * t Sourceval native_layer_norm :
t ->
normalized_shape:int list ->
weight:t option ->
bias:t option ->
eps:float ->
t * t * t Sourceval nll_loss2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
target:t ->
weight:t option ->
reduction:Torch_core.Reduction.t ->
ignore_index:int ->
total_weight:t ->
t Sourceval nll_loss_backward_out :
grad_input:t ->
grad_output:t ->
t ->
target:t ->
weight:t option ->
reduction:Torch_core.Reduction.t ->
ignore_index:int ->
total_weight:t ->
t Sourceval norm_except_dim : v:t -> pow:int -> dim:int -> t Sourceval normal_ : t -> mean:float -> std:float -> t Sourceval normal_out : out:t -> mean:t -> std:float -> t Sourceval normal_out1 : out:t -> mean:float -> std:t -> t Sourceval normal_out2 : out:t -> mean:t -> std:t -> t Sourceval normal_out3 : out:t -> mean:float -> std:float -> size:int list -> t Sourceval nuclear_norm : t -> keepdim:bool -> t Sourceval nuclear_norm1 : t -> dim:int list -> keepdim:bool -> t Sourceval nuclear_norm_out : out:t -> t -> keepdim:bool -> t Sourceval nuclear_norm_out1 : out:t -> t -> dim:int list -> keepdim:bool -> t Sourceval one_hot : t -> num_classes:int -> t Sourceval ones_out : out:t -> size:int list -> t Sourceval ormqr : t -> input2:t -> input3:t -> left:bool -> transpose:bool -> t Sourceval ormqr_out :
out:t ->
t ->
input2:t ->
input3:t ->
left:bool ->
transpose:bool ->
t Sourceval pairwise_distance :
x1:t ->
x2:t ->
p:float ->
eps:float ->
keepdim:bool ->
t Sourceval permute : t -> dims:int list -> t Sourceval pinverse : t -> rcond:float -> t Sourceval pixel_shuffle : t -> upscale_factor:int -> t Sourceval pixel_unshuffle : t -> downscale_factor:int -> t Sourceval polar_out : out:t -> abs:t -> angle:t -> t Sourceval polygamma_out : out:t -> n:int -> t -> t Sourceval prelu_backward : grad_output:t -> t -> weight:t -> t * t Sourceval put_ : t -> index:t -> source:t -> accumulate:bool -> t Sourceval q_per_channel_zero_points : t -> t Sourceval qr_out : q:t -> r:t -> t -> some:bool -> t * t Sourceval quantile : t -> q:float -> dim:int -> keepdim:bool -> t Sourceval quantile1 : t -> q:t -> dim:int -> keepdim:bool -> t Sourceval quantile_out : out:t -> t -> q:float -> dim:int -> keepdim:bool -> t Sourceval quantile_out1 : out:t -> t -> q:t -> dim:int -> keepdim:bool -> t Sourceval quantized_batch_norm :
t ->
weight:t option ->
bias:t option ->
mean:t ->
var:t ->
eps:float ->
output_scale:float ->
output_zero_point:int ->
t Sourceval quantized_max_pool1d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval quantized_max_pool2d :
t ->
kernel_size:int list ->
stride:int list ->
padding:int list ->
dilation:int list ->
ceil_mode:bool ->
t Sourceval rand_out : out:t -> size:int list -> t Sourceval randint_like : t -> high:int -> t Sourceval randint_like1 : t -> low:int -> high:int -> t Sourceval randint_out : out:t -> high:int -> size:int list -> t Sourceval randint_out1 : out:t -> low:int -> high:int -> size:int list -> t Sourceval randn_out : out:t -> size:int list -> t Sourceval random_2 : t -> from:int -> to_:int -> t Sourceval randperm_out : out:t -> n:int -> t Sourceval reflection_pad1d : t -> padding:int list -> t Sourceval reflection_pad1d_backward : grad_output:t -> t -> padding:int list -> t Sourceval reflection_pad1d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
padding:int list ->
t Sourceval reflection_pad1d_out : out:t -> t -> padding:int list -> t Sourceval reflection_pad2d : t -> padding:int list -> t Sourceval reflection_pad2d_backward : grad_output:t -> t -> padding:int list -> t Sourceval reflection_pad2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
padding:int list ->
t Sourceval reflection_pad2d_out : out:t -> t -> padding:int list -> t Sourceval repeat : t -> repeats:int list -> t Sourceval repeat_interleave : repeats:t -> t Sourceval repeat_interleave1 : t -> repeats:t -> dim:int -> t Sourceval repeat_interleave2 : t -> repeats:int -> dim:int -> t Sourceval replication_pad1d : t -> padding:int list -> t Sourceval replication_pad1d_backward : grad_output:t -> t -> padding:int list -> t Sourceval replication_pad1d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
padding:int list ->
t Sourceval replication_pad1d_out : out:t -> t -> padding:int list -> t Sourceval replication_pad2d : t -> padding:int list -> t Sourceval replication_pad2d_backward : grad_output:t -> t -> padding:int list -> t Sourceval replication_pad2d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
padding:int list ->
t Sourceval replication_pad2d_out : out:t -> t -> padding:int list -> t Sourceval replication_pad3d : t -> padding:int list -> t Sourceval replication_pad3d_backward : grad_output:t -> t -> padding:int list -> t Sourceval replication_pad3d_backward_out :
grad_input:t ->
grad_output:t ->
t ->
padding:int list ->
t Sourceval replication_pad3d_out : out:t -> t -> padding:int list -> t Sourceval requires_grad_ : t -> requires_grad:bool -> t Sourceval reshape : t -> shape:int list -> t Sourceval resize_ : t -> size:int list -> t Sourceval resize_as_ : t -> the_template:t -> t Sourceval rnn_relu :
t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_first:bool ->
t * t Sourceval rnn_relu1 :
data:t ->
batch_sizes:t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
t * t Sourceval rnn_relu_cell :
t ->
hx:t ->
w_ih:t ->
w_hh:t ->
b_ih:t option ->
b_hh:t option ->
t Sourceval rnn_tanh :
t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
batch_first:bool ->
t * t Sourceval rnn_tanh1 :
data:t ->
batch_sizes:t ->
hx:t ->
params:t list ->
has_biases:bool ->
num_layers:int ->
dropout:float ->
train:bool ->
bidirectional:bool ->
t * t Sourceval rnn_tanh_cell :
t ->
hx:t ->
w_ih:t ->
w_hh:t ->
b_ih:t option ->
b_hh:t option ->
t Sourceval roll : t -> shifts:int list -> dims:int list -> t Sourceval rot90 : t -> k:int -> dims:int list -> t Sourceval row_stack_out : out:t -> t list -> t Sourceval rrelu : t -> training:bool -> t Sourceval rrelu_ : t -> training:bool -> t Sourceval rrelu_with_noise : t -> noise:t -> training:bool -> t Sourceval rrelu_with_noise_ : t -> noise:t -> training:bool -> t Sourceval rrelu_with_noise_out : out:t -> t -> noise:t -> training:bool -> t Sourceval scatter : t -> dim:int -> index:t -> src:t -> t Sourceval scatter_ : t -> dim:int -> index:t -> src:t -> t Sourceval scatter_2 : t -> dim:int -> index:t -> src:t -> reduce:string -> t Sourceval scatter_add : t -> dim:int -> index:t -> src:t -> t Sourceval scatter_add_ : t -> dim:int -> index:t -> src:t -> t Sourceval searchsorted : sorted_sequence:t -> t -> out_int32:bool -> right:bool -> t Sourceval searchsorted_out :
out:t ->
sorted_sequence:t ->
t ->
out_int32:bool ->
right:bool ->
t Sourceval select_backward :
grad:t ->
input_sizes:int list ->
dim:int ->
index:int ->
t Sourceval set_requires_grad : t -> r:bool -> t Sourceval sigmoid_backward : grad_output:t -> output:t -> t Sourceval sigmoid_backward_out : grad_input:t -> grad_output:t -> output:t -> t Sourceval silu_backward : grad_output:t -> t -> t Sourceval slice : t -> dim:int -> start:int -> end_:int -> step:int -> t Sourceval slice_backward :
grad:t ->
input_sizes:int list ->
dim:int ->
start:int ->
end_:int ->
step:int ->
t Sourceval slow_conv3d :
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
t Sourceval slow_conv3d_out :
out:t ->
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
t Sourceval slow_conv_dilated2d :
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
t Sourceval slow_conv_dilated3d :
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
dilation:int list ->
t Sourceval slow_conv_transpose2d :
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
dilation:int list ->
t Sourceval slow_conv_transpose2d_out :
out:t ->
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
dilation:int list ->
t Sourceval slow_conv_transpose3d :
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
dilation:int list ->
t Sourceval slow_conv_transpose3d_out :
out:t ->
t ->
weight:t ->
kernel_size:int list ->
bias:t option ->
stride:int list ->
padding:int list ->
output_padding:int list ->
dilation:int list ->
t Sourceval solve_out : solution:t -> lu:t -> t -> a:t -> t * t Sourceval sort : t -> dim:int -> descending:bool -> t * t Sourceval sort_out :
values:t ->
indices:t ->
t ->
dim:int ->
descending:bool ->
t * t Sourceval sparse_resize_ : t -> size:int list -> sparse_dim:int -> dense_dim:int -> t Sourceval sparse_resize_and_clear_ :
t ->
size:int list ->
sparse_dim:int ->
dense_dim:int ->
t Sourceval split : t -> split_size:int -> dim:int -> t list Sourceval split_with_sizes : t -> split_sizes:int list -> dim:int -> t list Sourceval sspaddmm_out : out:t -> t -> mat1:t -> mat2:t -> t Sourceval stack : t list -> dim:int -> t Sourceval stack_out : out:t -> t list -> dim:int -> t Sourceval std1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t Sourceval std_mean : t -> unbiased:bool -> t * t Sourceval std_mean1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t * t Sourceval std_out : out:t -> t -> dim:int list -> unbiased:bool -> keepdim:bool -> t Sourceval stft :
t ->
n_fft:int ->
hop_length:int ->
win_length:int ->
window:t option ->
normalized:bool ->
onesided:bool ->
return_complex:bool ->
t Sourceval sum_to_size : t -> size:int list -> t Sourceval svd : t -> some:bool -> compute_uv:bool -> t * t * t Sourceval svd_out :
u:t ->
s:t ->
v:t ->
t ->
some:bool ->
compute_uv:bool ->
t * t * t Sourceval swapaxes : t -> axis0:int -> axis1:int -> t Sourceval swapaxes_ : t -> axis0:int -> axis1:int -> t Sourceval swapdims : t -> dim0:int -> dim1:int -> t Sourceval swapdims_ : t -> dim0:int -> dim1:int -> t Sourceval symeig : t -> eigenvectors:bool -> upper:bool -> t * t Sourceval symeig_out : e:t -> v:t -> t -> eigenvectors:bool -> upper:bool -> t * t Sourceval take_backward : grad:t -> t -> index:t -> t Sourceval tanh_backward : grad_output:t -> output:t -> t Sourceval tanh_backward_out : grad_input:t -> grad_output:t -> output:t -> t Sourceval tensor_split : t -> sections:int -> dim:int -> t list Sourceval tensor_split1 : t -> indices:int list -> dim:int -> t list Sourceval tensor_split2 : t -> tensor_indices_or_sections:t -> dim:int -> t list Sourceval tensordot : t -> t -> dims_self:int list -> dims_other:int list -> t Sourceval tensordot_out :
out:t ->
t ->
t ->
dims_self:int list ->
dims_other:int list ->
t Sourceval tile : t -> dims:int list -> t Sourceval to3 : t -> t -> non_blocking:bool -> copy:bool -> t Sourceval to_dense_backward : grad:t -> t -> t Sourceval to_mkldnn_backward : grad:t -> t -> t Sourceval to_sparse1 : t -> sparse_dim:int -> t Sourceval topk : t -> k:int -> dim:int -> largest:bool -> sorted:bool -> t * t Sourceval topk_out :
values:t ->
indices:t ->
t ->
k:int ->
dim:int ->
largest:bool ->
sorted:bool ->
t * t Sourceval trace_backward : grad:t -> sizes:int list -> t Sourceval transpose : t -> dim0:int -> dim1:int -> t Sourceval transpose_ : t -> dim0:int -> dim1:int -> t Sourceval trapz1 : y:t -> dx:float -> dim:int -> t Sourceval triangular_solve :
t ->
a:t ->
upper:bool ->
transpose:bool ->
unitriangular:bool ->
t * t Sourceval triangular_solve_out :
x:t ->
m:t ->
t ->
a:t ->
upper:bool ->
transpose:bool ->
unitriangular:bool ->
t * t Sourceval tril_ : t -> diagonal:int -> t Sourceval tril_out : out:t -> t -> diagonal:int -> t Sourceval triplet_margin_loss :
anchor:t ->
positive:t ->
negative:t ->
margin:float ->
p:float ->
eps:float ->
swap:bool ->
reduction:Torch_core.Reduction.t ->
t Sourceval triu_ : t -> diagonal:int -> t Sourceval triu_out : out:t -> t -> diagonal:int -> t Sourceval unbind : t -> dim:int -> t list Sourceval unflatten : t -> dim:int -> sizes:int list -> t Sourceval unfold : t -> dimension:int -> size:int -> step:int -> t Sourceval unfold_backward :
grad_in:t ->
input_sizes:int list ->
dim:int ->
size:int ->
step:int ->
t Sourceval unique_consecutive :
t ->
return_inverse:bool ->
return_counts:bool ->
dim:int ->
t * t * t Sourceval unique_dim :
t ->
dim:int ->
sorted:bool ->
return_inverse:bool ->
return_counts:bool ->
t * t * t Sourceval unique_dim_consecutive :
t ->
dim:int ->
return_inverse:bool ->
return_counts:bool ->
t * t * t Sourceval unsafe_chunk : t -> chunks:int -> dim:int -> t list Sourceval unsafe_split : t -> split_size:int -> dim:int -> t list Sourceval unsafe_split_with_sizes : t -> split_sizes:int list -> dim:int -> t list Sourceval unsqueeze_ : t -> dim:int -> t Sourceval upsample_bicubic2d :
t ->
output_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bicubic2d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bicubic2d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bicubic2d_out :
out:t ->
t ->
output_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bilinear2d :
t ->
output_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bilinear2d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bilinear2d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_bilinear2d_out :
out:t ->
t ->
output_size:int list ->
align_corners:bool ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_linear1d :
t ->
output_size:int list ->
align_corners:bool ->
scales:float ->
t Sourceval upsample_linear1d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales:float ->
t Sourceval upsample_linear1d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales:float ->
t Sourceval upsample_linear1d_out :
out:t ->
t ->
output_size:int list ->
align_corners:bool ->
scales:float ->
t Sourceval upsample_nearest1d : t -> output_size:int list -> scales:float -> t Sourceval upsample_nearest1d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales:float ->
t Sourceval upsample_nearest1d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales:float ->
t Sourceval upsample_nearest1d_out :
out:t ->
t ->
output_size:int list ->
scales:float ->
t Sourceval upsample_nearest2d :
t ->
output_size:int list ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest2d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest2d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest2d_out :
out:t ->
t ->
output_size:int list ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest3d :
t ->
output_size:int list ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest3d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest3d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_nearest3d_out :
out:t ->
t ->
output_size:int list ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_trilinear3d :
t ->
output_size:int list ->
align_corners:bool ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_trilinear3d_backward :
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_trilinear3d_backward_out :
grad_input:t ->
grad_output:t ->
output_size:int list ->
input_size:int list ->
align_corners:bool ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval upsample_trilinear3d_out :
out:t ->
t ->
output_size:int list ->
align_corners:bool ->
scales_d:float ->
scales_h:float ->
scales_w:float ->
t Sourceval value_selecting_reduction_backward :
grad:t ->
dim:int ->
indices:t ->
sizes:int list ->
keepdim:bool ->
t Sourceval vander : x:t -> n:int -> increasing:bool -> t Sourceval var1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t Sourceval var_mean : t -> unbiased:bool -> t * t Sourceval var_mean1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t * t Sourceval var_out : out:t -> t -> dim:int list -> unbiased:bool -> keepdim:bool -> t Sourceval view : t -> size:int list -> t Sourceval zeros_out : out:t -> size:int list -> t Sourceval int_vec :
?kind:[ `int | `int16 | `int64 | `int8 | `uint8 ] ->
int list ->
t Sourceval shape2_exn : t -> int * int Sourceval shape3_exn : t -> int * int * int Sourceval shape4_exn : t -> int * int * int * int Sourceval requires_grad : t -> bool Sourceval grad_set_enabled : bool -> bool Sourceval select : t -> dim:int -> index:int -> t Sourceval float_get : t -> int list -> float Sourceval int_get : t -> int list -> int Sourceval float_set : t -> int list -> float -> unit Sourceval int_set : t -> int list -> int -> unit Sourceval fill_float : t -> float -> unit Sourceval fill_int : t -> int -> unit Sourceval backward : ?keep_graph:bool -> ?create_graph:bool -> t -> unit Sourceval run_backward :
?keep_graph:bool ->
?create_graph:bool ->
t list ->
t list ->
t list Sourceval to_string : t -> line_size:int -> string Sourceval argmax : ?dim:int -> ?keepdim:bool -> t -> t Sourceval set_float2 : t -> int -> int -> float -> unit set_float2 t i j v sets the element at index i and j of bidimensional tensor t to v.
Sourceval set_float1 : t -> int -> float -> unit set_float1 t i v sets the element at index i of single dimension tensor t to v.
Sourceval set_int2 : t -> int -> int -> int -> unit set_int2 t i j v sets the element at index i and j of bidimensional tensor t to v.
Sourceval set_int1 : t -> int -> int -> unit set_int1 t i v sets the element at index i of single dimension tensor t to v.
Sourceval get_float2 : t -> int -> int -> float get_float2 t i j returns the current value from bidimensional tensor t at index i and j.
Sourceval get_float1 : t -> int -> float get_float1 t i j returns the current value from single dimension tensor t at index i.
Sourceval get_int2 : t -> int -> int -> int get_int2 t i j returns the current value from bidimensional tensor t at indexex i and j.
Sourceval get_int1 : t -> int -> int get_int1 t i j returns the current value from single dimension tensor t at index i.
Sourceval (.%{}) : t -> int list -> int Gets an integer element from an arbitrary dimension tensor.
Sourceval (.%{}<-) : t -> int list -> int -> unit Sets an integer element on an arbitrary dimension tensor.
Sourceval (.%.{}) : t -> int list -> float Gets a float element from an arbitrary dimension tensor.
Sourceval (.%.{}<-) : t -> int list -> float -> unit Sets a float element on an arbitrary dimension tensor.
Gets an integer element from a single dimension tensor.
Sourceval (.%[]<-) : t -> int -> int -> unit Sets an integer element on a single dimension tensor.
Sourceval (.%.[]) : t -> int -> float Gets a float element from a single dimension tensor.
Sourceval (.%.[]<-) : t -> int -> float -> unit Sets a float element on a single dimension tensor.
Sourceval no_grad_ : t -> f:(t -> 'a) -> 'a no_grad_ t ~f runs f on t without tracking gradients for t.
Sourceval no_grad : (unit -> 'a) -> 'a Pointwise multiplication.
t += u modifies t by adding values from u in a pointwise way.
t -= u modifies t by subtracting values from u in a pointwise way.
t *= u modifies t by multiplying values from u in a pointwise way.
t /= u modifies t by dividing values from u in a pointwise way.
~-u returns the opposite of t, i.e. the same as Tensor.(f 0. - t).
eq t1 t2 returns true if t1 and t2 have the same kind, shape, and all their elements are identical.
mm t1 t2 returns the dot product or matrix multiplication between t1 and t2.
f v returns a scalar tensor with value v.
Creates a tensor with value 0.
Creates a tensor with value 1.
Creates a tensor with random values sampled uniformly between 0 and 1.
Creates a tensor with random values sampled using a standard normal distribution.
Creates a tensor from a list of float values.
to_type t ~type_ returns a tensor similar to t but converted to kind type_.
to_kind t ~kind returns a tensor similar to t but converted to kind kind.
kind t returns the kind of elements hold in tensor t.
to_device t ~device returns a tensor identical to t but placed on device device.
Sourceval to_float0 : t -> float option to_float0 t returns the value hold in a scalar (0-dimension) tensor. If the dimension are incorrect, None is returned.
Sourceval to_float1 : t -> float array option to_float1 t returns the array of values hold in a single dimension tensor. If the dimension are incorrect, None is returned.
Sourceval to_float2 : t -> float array array option to_float2 t returns the array of values hold in a bidimensional tensor. If the dimension are incorrect, None is returned.
Sourceval to_float3 : t -> float array array array option to_float3 t returns the array of values hold in a tridimensional tensor. If the dimension are incorrect, None is returned.
Sourceval to_float0_exn : t -> float to_float0_exn t returns the value hold in a scalar (0-dimension) tensor.
Sourceval to_float1_exn : t -> float array to_float1_exn t returns the array of values hold in a single dimension tensor.
Sourceval to_float2_exn : t -> float array array to_float2_exn t returns the array of values hold in a bidimensional tensor.
Sourceval to_float3_exn : t -> float array array array to_float3_exn t returns the array of values hold in a tridimensional tensor.
Sourceval to_int0 : t -> int option to_int0 t returns the value hold in a scalar (0-dimension) tensor. If the dimension are incorrect, None is returned.
Sourceval to_int1 : t -> int array option to_int1 t returns the array of values hold in a single dimension tensor. If the dimension are incorrect, None is returned.
Sourceval to_int2 : t -> int array array option to_int2 t returns the array of values hold in a bidimensional tensor. If the dimension are incorrect, None is returned.
Sourceval to_int3 : t -> int array array array option to_int3 t returns the array of values hold in a tridimensional tensor. If the dimension are incorrect, None is returned.
to_int0_exn t returns the value hold in a scalar (0-dimension) tensor.
Sourceval to_int1_exn : t -> int array to_int1_exn t returns the array of values hold in a single dimension tensor.
Sourceval to_int2_exn : t -> int array array to_int2_exn t returns the array of values hold in a bidimensional tensor.
Sourceval to_int3_exn : t -> int array array array to_int3_exn t returns the array of values hold in a tridimensional tensor.
of_float0 v creates a scalar (0-dimension) tensor with value v.
of_float1 v creates a single dimension tensor with values vs.
of_float2 v creates a two dimension tensor with values vs.
of_float3 v creates a three dimension tensor with values vs.
of_int0 v creates a scalar (0-dimension) tensor with value v.
of_int1 v creates a single dimension tensor with values vs.
of_int2 v creates a two dimension tensor with values vs.
of_int3 v creates a three dimension tensor with values vs.
Sourceval conv2d :
?padding:(int * int) ->
?dilation:(int * int) ->
?groups:int ->
t ->
t ->
t option ->
stride:(int * int) ->
t Sourceval conv_transpose2d :
?output_padding:(int * int) ->
?padding:(int * int) ->
?dilation:(int * int) ->
?groups:int ->
t ->
t ->
t option ->
stride:(int * int) ->
t Sourceval max_pool2d :
?padding:(int * int) ->
?dilation:(int * int) ->
?ceil_mode:bool ->
?stride:(int * int) ->
t ->
ksize:(int * int) ->
t Sourceval avg_pool2d :
?padding:(int * int) ->
?count_include_pad:bool ->
?ceil_mode:bool ->
?stride:(int * int) ->
?divisor_override:int ->
t ->
ksize:(int * int) ->
t Sourceval const_batch_norm : ?momentum:float -> ?eps:float -> t -> t of_bigarray ba returns a tensor which shape and kind are based on ba and holding the same data.
copy_to_bigarray t ba copies the data from t to ba. The dimensions of ba and its kind of element must match the dimension and kind of t.
to_bigarray t ~kind converts t to a bigarray using the c layout. kind has to be compatible with the element kind of t.
Sourceval dropout : t -> p:float -> is_training:bool -> t dropout t ~p ~is_training applies dropout to t with probability p. If is_training is false, t is returned. If is_training is true, a tensor similar to t is returned except that each element has a probability p to be replaced by 0.
bce_loss t ~targets returns the binary cross entropy loss between t and targets. Elements of t are supposed to represent a probability distribution (according to the last dimension of t), so should be between 0 and 1 and sum to 1.
bce_loss_with_logits t ~targets returns the binary cross entropy loss between t and targets. Elements of t are logits, a softmax is used in this function to convert them to a probability distribution.
mse_loss t1 t2 returns the square of the difference between t1 and t2. reduction can be used to either keep the whole tensor or reduce it by averaging or summing.
pp is a pretty-printer for tensors to be used in top-levels such as utop or jupyter.
copy t returns a new copy of t with the same size and data which does not share storage with t.
shape_str t returns the shape/size of the current tensor as a string. This is useful for pretty printing.
Sourceval print_shape : ?name:string -> t -> unit print_shape ?name t prints the shape/size of t on stdout. If name is provided, this is also printed.
minimum t returns the minimum element of tensor t.
maximum t returns the maximum element of tensor t.
flatten t returns a flattened version of t, i.e. a single dimension version of the tensor. This is equivalent to Tensor.view t ~size:[-1].
squeeze_last t squeezes the last dimension of t, i.e. if this dimension has a size of 1 it is removed.
scale t f returns the result of multiplying tensor t by f.
to_list t returns the list of tensors extracted from the first dimension. This is the inverse of cat ~dim:0.
Sourceval min_values : t -> dim:int list -> keepdim:bool -> t Sourceval max_values : t -> dim:int list -> keepdim:bool -> t