Up – Package index » torch » Library torch » Torch » Tensortorch README Library torch Library torch.core Library torch.toplevel Library torch.vision Sources include module type of Torch_core.Wrapper.Tensor with type t := t include Torch_core.Wrapper_generated_intf.S
with type t := t
and type scalar := Torch_core.Wrapper.Scalar.t Source val adaptive_avg_pool1d : t -> output_size :int list -> t Source val adaptive_avg_pool2d : t -> output_size :int list -> t Source val adaptive_avg_pool2d_out : out :t -> t -> output_size :int list -> t Source val adaptive_avg_pool3d : t -> output_size :int list -> t Source val adaptive_avg_pool3d_backward : grad_output :t -> t -> t Source val adaptive_avg_pool3d_backward_out : grad_input :t -> grad_output :t -> t -> t Source val adaptive_avg_pool3d_out : out :t -> t -> output_size :int list -> t Source val adaptive_max_pool1d : t -> output_size :int list -> t * t Source val adaptive_max_pool2d : t -> output_size :int list -> t * t Source val adaptive_max_pool2d_backward : grad_output :t -> t -> indices :t -> t Source val adaptive_max_pool2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
indices :t ->
t Source val adaptive_max_pool2d_out :
out :t ->
indices :t ->
t ->
output_size :int list ->
t * t Source val adaptive_max_pool3d : t -> output_size :int list -> t * t Source val adaptive_max_pool3d_backward : grad_output :t -> t -> indices :t -> t Source val adaptive_max_pool3d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
indices :t ->
t Source val adaptive_max_pool3d_out :
out :t ->
indices :t ->
t ->
output_size :int list ->
t * t Source val addbmm_ : t -> batch1 :t -> batch2 :t -> t Source val addbmm_out : out :t -> t -> batch1 :t -> batch2 :t -> t Source val addcdiv : t -> tensor1 :t -> tensor2 :t -> t Source val addcdiv_ : t -> tensor1 :t -> tensor2 :t -> t Source val addcdiv_out : out :t -> t -> tensor1 :t -> tensor2 :t -> t Source val addcmul : t -> tensor1 :t -> tensor2 :t -> t Source val addcmul_ : t -> tensor1 :t -> tensor2 :t -> t Source val addcmul_out : out :t -> t -> tensor1 :t -> tensor2 :t -> t Source val addmm_out : out :t -> t -> mat1 :t -> mat2 :t -> t Source val addmv_out : out :t -> t -> mat :t -> vec :t -> t Source val addr_out : out :t -> t -> vec1 :t -> vec2 :t -> t Source val affine_grid_generator : theta :t -> size :int list -> t Source val affine_grid_generator_backward : grad :t -> size :int list -> t Source val all1 : t -> dim :int -> keepdim :bool -> t Source val all_out : out :t -> t -> dim :int -> keepdim :bool -> t Source val alpha_dropout : t -> p :float -> train :bool -> t Source val alpha_dropout_ : t -> p :float -> train :bool -> t Source val any1 : t -> dim :int -> keepdim :bool -> t Source val any_out : out :t -> t -> dim :int -> keepdim :bool -> t Source val argmin : t -> dim :int -> keepdim :bool -> t Source val argsort : t -> dim :int -> descending :bool -> t Source val as_strided :
t ->
size :int list ->
stride :int list ->
storage_offset :int ->
t Source val as_strided_ :
t ->
size :int list ->
stride :int list ->
storage_offset :int ->
t Source val avg_pool1d :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool2d_backward :
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool2d_out :
out :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool3d :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool3d_backward :
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool3d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val avg_pool3d_out :
out :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
ceil_mode :bool ->
count_include_pad :bool ->
t Source val baddbmm : t -> batch1 :t -> batch2 :t -> t Source val baddbmm_ : t -> batch1 :t -> batch2 :t -> t Source val baddbmm_out : out :t -> t -> batch1 :t -> batch2 :t -> t Source val batch_norm :
t ->
weight :t option ->
bias :t option ->
running_mean :t option ->
running_var :t option ->
training :bool ->
momentum :float ->
eps :float ->
cudnn_enabled :bool ->
t Source val batch_norm_backward_elemt :
grad_out :t ->
t ->
mean :t ->
invstd :t ->
weight :t option ->
mean_dy :t ->
mean_dy_xmu :t ->
t Source val batch_norm_backward_reduce :
grad_out :t ->
t ->
mean :t ->
invstd :t ->
input_g :bool ->
weight_g :bool ->
bias_g :bool ->
t * t * t * t Source val batch_norm_elemt :
t ->
weight :t option ->
bias :t option ->
mean :t ->
invstd :t ->
eps :float ->
t Source val batch_norm_gather_stats :
t ->
mean :t ->
invstd :t ->
running_mean :t option ->
running_var :t option ->
momentum :float ->
eps :float ->
count :int ->
t * t Source val batch_norm_stats : t -> eps :float -> t * t Source val batch_norm_update_stats :
t ->
running_mean :t option ->
running_var :t option ->
momentum :float ->
t * t Source val bernoulli1 : t -> p :float -> t Source val bernoulli_1 : t -> p :float -> t Source val bilinear : input1 :t -> input2 :t -> weight :t -> bias :t option -> t Source val binary_cross_entropy :
t ->
target :t ->
weight :t option ->
reduction :int ->
t Source val binary_cross_entropy_backward :
grad_output :t ->
t ->
target :t ->
weight :t ->
reduction :int ->
t Source val binary_cross_entropy_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
weight :t ->
reduction :int ->
t Source val binary_cross_entropy_out :
out :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
t Source val binary_cross_entropy_with_logits :
t ->
target :t ->
weight :t option ->
pos_weight :t option ->
reduction :int ->
t Source val binary_cross_entropy_with_logits_backward :
grad_output :t ->
t ->
target :t ->
weight :t option ->
pos_weight :t option ->
reduction :int ->
t Source val bincount : t -> weights :t option -> minlength :int -> t Source val broadcast_tensors : t list -> t listSource val cat_out : out :t -> t list -> dim :int -> t Source val cauchy_ : t -> median :float -> sigma :float -> t Source val cdist : x1 :t -> x2 :t -> p :float -> t Source val chain_matmul : matrices :t list -> t Source val cholesky : t -> upper :bool -> t Source val cholesky_inverse : t -> upper :bool -> t Source val cholesky_inverse_out : out :t -> t -> upper :bool -> t Source val cholesky_out : out :t -> t -> upper :bool -> t Source val cholesky_solve : t -> input2 :t -> upper :bool -> t Source val cholesky_solve_out : out :t -> t -> input2 :t -> upper :bool -> t Source val chunk : t -> chunks :int -> dim :int -> t listSource val combinations : t -> r :int -> with_replacement :bool -> t Source val constant_pad_nd : t -> pad :int list -> t Source val conv1d :
t ->
weight :t ->
bias :t option ->
stride :int list ->
padding :int list ->
dilation :int list ->
groups :int ->
t Source val conv3d :
t ->
weight :t ->
bias :t option ->
stride :int list ->
padding :int list ->
dilation :int list ->
groups :int ->
t Source val conv_tbc : t -> weight :t -> bias :t -> pad :int -> t Source val conv_tbc_backward : t -> t -> weight :t -> bias :t -> pad :int -> t * t * t Source val conv_transpose1d :
t ->
weight :t ->
bias :t option ->
stride :int list ->
padding :int list ->
output_padding :int list ->
groups :int ->
dilation :int list ->
t Source val conv_transpose3d :
t ->
weight :t ->
bias :t option ->
stride :int list ->
padding :int list ->
output_padding :int list ->
groups :int ->
dilation :int list ->
t Source val convolution :
t ->
weight :t ->
bias :t option ->
stride :int list ->
padding :int list ->
dilation :int list ->
transposed :bool ->
output_padding :int list ->
groups :int ->
t Source val copy_sparse_to_sparse_ : t -> src :t -> non_blocking :bool -> t Source val cosine_embedding_loss :
input1 :t ->
input2 :t ->
target :t ->
margin :float ->
reduction :int ->
t Source val cosine_similarity : x1 :t -> x2 :t -> dim :int -> eps :float -> t Source val cross_out : out :t -> t -> t -> dim :int -> t Source val ctc_loss :
log_probs :t ->
targets :t ->
input_lengths :int list ->
target_lengths :int list ->
blank :int ->
reduction :int ->
zero_infinity :bool ->
t Source val ctc_loss1 :
log_probs :t ->
targets :t ->
input_lengths :t ->
target_lengths :t ->
blank :int ->
reduction :int ->
zero_infinity :bool ->
t Source val cudnn_affine_grid_generator :
theta :t ->
n :int ->
c :int ->
h :int ->
w :int ->
t Source val cudnn_affine_grid_generator_backward :
grad :t ->
n :int ->
c :int ->
h :int ->
w :int ->
t Source val cudnn_batch_norm :
t ->
weight :t ->
bias :t option ->
running_mean :t option ->
running_var :t option ->
training :bool ->
exponential_average_factor :float ->
epsilon :float ->
t * t * t Source val cudnn_batch_norm_backward :
t ->
grad_output :t ->
weight :t ->
running_mean :t option ->
running_var :t option ->
save_mean :t option ->
save_var :t option ->
epsilon :float ->
t * t * t Source val cudnn_convolution :
t ->
weight :t ->
bias :t option ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val cudnn_convolution_backward_bias : grad_output :t -> t Source val cudnn_convolution_backward_weight :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val cudnn_convolution_transpose :
t ->
weight :t ->
bias :t option ->
padding :int list ->
output_padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val cudnn_convolution_transpose_backward_bias : grad_output :t -> t Source val cudnn_convolution_transpose_backward_weight :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val cudnn_grid_sampler : t -> grid :t -> t Source val cudnn_grid_sampler_backward : t -> grid :t -> grad_output :t -> t * t Source val cumprod_out : out :t -> t -> dim :int -> t Source val cumsum_out : out :t -> t -> dim :int -> t Source val diag_embed : t -> offset :int -> dim1 :int -> dim2 :int -> t Source val diag_out : out :t -> t -> diagonal :int -> t Source val diagflat : t -> offset :int -> t Source val diagonal : t -> offset :int -> dim1 :int -> dim2 :int -> t Source val dropout_ : t -> p :float -> train :bool -> t Source val eig : t -> eigenvectors :bool -> t * t Source val eig_out : e :t -> v :t -> t -> eigenvectors :bool -> t * t Source val embedding :
weight :t ->
indices :t ->
padding_idx :int ->
scale_grad_by_freq :bool ->
sparse :bool ->
t Source val embedding_backward :
grad :t ->
indices :t ->
num_weights :int ->
padding_idx :int ->
scale_grad_by_freq :bool ->
sparse :bool ->
t Source val embedding_bag :
weight :t ->
indices :t ->
offsets :t ->
scale_grad_by_freq :bool ->
mode :int ->
sparse :bool ->
per_sample_weights :t option ->
t * t * t * t Source val embedding_dense_backward :
grad_output :t ->
indices :t ->
num_weights :int ->
padding_idx :int ->
scale_grad_by_freq :bool ->
t Source val embedding_renorm_ :
t ->
indices :t ->
max_norm :float ->
norm_type :float ->
t Source val embedding_sparse_backward :
grad :t ->
indices :t ->
num_weights :int ->
padding_idx :int ->
scale_grad_by_freq :bool ->
t Source val empty_out : out :t -> size :int list -> t Source val expand : t -> size :int list -> implicit :bool -> t Source val exponential_ : t -> lambd :float -> t Source val eye_out1 : out :t -> n :int -> m :int -> t Source val fbgemm_pack_quantized_matrix : t -> k :int -> n :int -> t Source val feature_alpha_dropout : t -> p :float -> train :bool -> t Source val feature_alpha_dropout_ : t -> p :float -> train :bool -> t Source val feature_dropout : t -> p :float -> train :bool -> t Source val feature_dropout_ : t -> p :float -> train :bool -> t Source val fft : t -> signal_ndim :int -> normalized :bool -> t Source val flip : t -> dims :int list -> t Source val fractional_max_pool2d :
t ->
kernel_size :int list ->
output_size :int list ->
random_samples :t ->
t * t Source val fractional_max_pool2d_backward :
grad_output :t ->
t ->
kernel_size :int list ->
output_size :int list ->
indices :t ->
t Source val fractional_max_pool2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
output_size :int list ->
indices :t ->
t Source val fractional_max_pool2d_out :
output :t ->
indices :t ->
t ->
kernel_size :int list ->
output_size :int list ->
random_samples :t ->
t * t Source val fractional_max_pool3d :
t ->
kernel_size :int list ->
output_size :int list ->
random_samples :t ->
t * t Source val fractional_max_pool3d_backward :
grad_output :t ->
t ->
kernel_size :int list ->
output_size :int list ->
indices :t ->
t Source val fractional_max_pool3d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
output_size :int list ->
indices :t ->
t Source val fractional_max_pool3d_out :
output :t ->
indices :t ->
t ->
kernel_size :int list ->
output_size :int list ->
random_samples :t ->
t * t Source val frobenius_norm1 : t -> dim :int list -> keepdim :bool -> t Source val frobenius_norm_out : out :t -> t -> dim :int list -> keepdim :bool -> t Source val gather : t -> dim :int -> index :t -> sparse_grad :bool -> t Source val gather_out : out :t -> t -> dim :int -> index :t -> sparse_grad :bool -> t Source val geometric_ : t -> p :float -> t Source val glu_backward : grad_output :t -> t -> dim :int -> t Source val glu_backward_out : grad_input :t -> grad_output :t -> t -> dim :int -> t Source val glu_out : out :t -> t -> dim :int -> t Source val grid_sampler :
t ->
grid :t ->
interpolation_mode :int ->
padding_mode :int ->
t Source val grid_sampler_2d :
t ->
grid :t ->
interpolation_mode :int ->
padding_mode :int ->
t Source val grid_sampler_2d_backward :
grad_output :t ->
t ->
grid :t ->
interpolation_mode :int ->
padding_mode :int ->
t * t Source val grid_sampler_3d :
t ->
grid :t ->
interpolation_mode :int ->
padding_mode :int ->
t Source val grid_sampler_3d_backward :
grad_output :t ->
t ->
grid :t ->
interpolation_mode :int ->
padding_mode :int ->
t * t Source val group_norm :
t ->
num_groups :int ->
weight :t option ->
bias :t option ->
eps :float ->
cudnn_enabled :bool ->
t Source val gru :
t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
batch_first :bool ->
t * t Source val gru1 :
data :t ->
batch_sizes :t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
t * t Source val gru_cell :
t ->
hx :t ->
w_ih :t ->
w_hh :t ->
b_ih :t option ->
b_hh :t option ->
t Source val hinge_embedding_loss : t -> target :t -> margin :float -> reduction :int -> t Source val histc_out : out :t -> t -> bins :int -> t Source val hspmm_out : out :t -> mat1 :t -> mat2 :t -> t Source val ifft : t -> signal_ndim :int -> normalized :bool -> t Source val index_add : t -> dim :int -> index :t -> source :t -> t Source val index_add_ : t -> dim :int -> index :t -> source :t -> t Source val index_copy : t -> dim :int -> index :t -> source :t -> t Source val index_copy_ : t -> dim :int -> index :t -> source :t -> t Source val index_fill1 : t -> dim :int -> index :t -> value :t -> t Source val index_fill_1 : t -> dim :int -> index :t -> value :t -> t Source val index_put : t -> indices :t list -> values :t -> accumulate :bool -> t Source val index_put_ : t -> indices :t list -> values :t -> accumulate :bool -> t Source val index_select : t -> dim :int -> index :t -> t Source val index_select_out : out :t -> t -> dim :int -> index :t -> t Source val instance_norm :
t ->
weight :t option ->
bias :t option ->
running_mean :t option ->
running_var :t option ->
use_input_stats :bool ->
momentum :float ->
eps :float ->
cudnn_enabled :bool ->
t Source val irfft :
t ->
signal_ndim :int ->
normalized :bool ->
onesided :bool ->
signal_sizes :int list ->
t Source val isclose : t -> t -> rtol :float -> atol :float -> equal_nan :bool -> t Source val kl_div : t -> target :t -> reduction :int -> t Source val kl_div_backward : grad_output :t -> t -> target :t -> reduction :int -> t Source val kthvalue : t -> k :int -> dim :int -> keepdim :bool -> t * t Source val kthvalue_out :
values :t ->
indices :t ->
t ->
k :int ->
dim :int ->
keepdim :bool ->
t * t Source val l1_loss : t -> target :t -> reduction :int -> t Source val l1_loss_backward : grad_output :t -> t -> target :t -> reduction :int -> t Source val l1_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val l1_loss_out : out :t -> t -> target :t -> reduction :int -> t Source val layer_norm :
t ->
normalized_shape :int list ->
weight :t option ->
bias :t option ->
eps :float ->
cudnn_enable :bool ->
t Source val lerp_out1 : out :t -> t -> end_ :t -> weight :t -> t Source val linear : t -> weight :t -> bias :t option -> t Source val log_normal_ : t -> mean :float -> std :float -> t Source val log_sigmoid_backward : grad_output :t -> t -> buffer :t -> t Source val log_sigmoid_backward_out :
grad_input :t ->
grad_output :t ->
t ->
buffer :t ->
t Source val log_softmax : t -> dim :int -> t Source val logsumexp : t -> dim :int list -> keepdim :bool -> t Source val logsumexp_out : out :t -> t -> dim :int list -> keepdim :bool -> t Source val lstm :
t ->
hx :t list ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
batch_first :bool ->
t * t * t Source val lstm1 :
data :t ->
batch_sizes :t ->
hx :t list ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
t * t * t Source val lstm_cell :
t ->
hx :t list ->
w_ih :t ->
w_hh :t ->
b_ih :t option ->
b_hh :t option ->
t * t Source val lu_solve : t -> lu_data :t -> lu_pivots :t -> t Source val lu_solve_out : out :t -> t -> lu_data :t -> lu_pivots :t -> t Source val margin_ranking_loss :
input1 :t ->
input2 :t ->
target :t ->
margin :float ->
reduction :int ->
t Source val masked_fill1 : t -> mask :t -> value :t -> t Source val masked_fill_1 : t -> mask :t -> value :t -> t Source val masked_scatter : t -> mask :t -> source :t -> t Source val masked_scatter_ : t -> mask :t -> source :t -> t Source val masked_select_out : out :t -> t -> mask :t -> t Source val matrix_power : t -> n :int -> t Source val matrix_rank : t -> symmetric :bool -> t Source val matrix_rank1 : t -> tol :float -> symmetric :bool -> t Source val max2 : t -> dim :int -> keepdim :bool -> t * t Source val max_out1 : max :t -> max_values :t -> t -> dim :int -> keepdim :bool -> t * t Source val max_pool1d :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t Source val max_pool1d_with_indices :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t * t Source val max_pool2d_with_indices :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t * t Source val max_pool2d_with_indices_backward :
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
indices :t ->
t Source val max_pool2d_with_indices_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
indices :t ->
t Source val max_pool2d_with_indices_out :
output :t ->
indices :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t * t Source val max_pool3d :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t Source val max_pool3d_with_indices :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t * t Source val max_pool3d_with_indices_backward :
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
indices :t ->
t Source val max_pool3d_with_indices_backward_out :
grad_input :t ->
grad_output :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
indices :t ->
t Source val max_pool3d_with_indices_out :
output :t ->
indices :t ->
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t * t Source val max_unpool2d : t -> indices :t -> output_size :int list -> t Source val max_unpool2d_backward :
grad_output :t ->
t ->
indices :t ->
output_size :int list ->
t Source val max_unpool2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
indices :t ->
output_size :int list ->
t Source val max_unpool2d_out : out :t -> t -> indices :t -> output_size :int list -> t Source val max_unpool3d :
t ->
indices :t ->
output_size :int list ->
stride :int list ->
padding :int list ->
t Source val max_unpool3d_backward :
grad_output :t ->
t ->
indices :t ->
output_size :int list ->
stride :int list ->
padding :int list ->
t Source val max_unpool3d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
indices :t ->
output_size :int list ->
stride :int list ->
padding :int list ->
t Source val max_unpool3d_out :
out :t ->
t ->
indices :t ->
output_size :int list ->
stride :int list ->
padding :int list ->
t Source val max_values : t -> dim :int list -> keepdim :bool -> t Source val mean2 : t -> dim :int list -> keepdim :bool -> t Source val mean_out : out :t -> t -> dim :int list -> keepdim :bool -> t Source val min2 : t -> dim :int -> keepdim :bool -> t * t Source val min_out1 : min :t -> min_indices :t -> t -> dim :int -> keepdim :bool -> t * t Source val min_values : t -> dim :int list -> keepdim :bool -> t Source val miopen_batch_norm :
t ->
weight :t ->
bias :t option ->
running_mean :t option ->
running_var :t option ->
training :bool ->
exponential_average_factor :float ->
epsilon :float ->
t * t * t Source val miopen_batch_norm_backward :
t ->
grad_output :t ->
weight :t ->
running_mean :t option ->
running_var :t option ->
save_mean :t option ->
save_var :t option ->
epsilon :float ->
t * t * t Source val miopen_convolution :
t ->
weight :t ->
bias :t option ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val miopen_convolution_backward_bias : grad_output :t -> t Source val miopen_convolution_backward_weight :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val miopen_convolution_transpose :
t ->
weight :t ->
bias :t option ->
padding :int list ->
output_padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val miopen_convolution_transpose_backward_weight :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val miopen_depthwise_convolution :
t ->
weight :t ->
bias :t option ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val miopen_depthwise_convolution_backward_weight :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
benchmark :bool ->
deterministic :bool ->
t Source val mkldnn_convolution :
t ->
weight :t ->
bias :t option ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
t Source val mkldnn_convolution_backward_weights :
weight_size :int list ->
grad_output :t ->
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
bias_defined :bool ->
t * t Source val mkldnn_linear : t -> weight :t -> bias :t option -> t Source val mkldnn_max_pool2d :
t ->
kernel_size :int list ->
stride :int list ->
padding :int list ->
dilation :int list ->
ceil_mode :bool ->
t Source val mkldnn_reorder_conv2d_weight :
t ->
padding :int list ->
stride :int list ->
dilation :int list ->
groups :int ->
t Source val mkldnn_reshape : t -> shape :int list -> t Source val mode : t -> dim :int -> keepdim :bool -> t * t Source val mode_out : values :t -> indices :t -> t -> dim :int -> keepdim :bool -> t * t Source val mse_loss_backward : grad_output :t -> t -> target :t -> reduction :int -> t Source val mse_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val mse_loss_out : out :t -> t -> target :t -> reduction :int -> t Source val multilabel_margin_loss : t -> target :t -> reduction :int -> t Source val multilabel_margin_loss_backward :
grad_output :t ->
t ->
target :t ->
reduction :int ->
is_target :t ->
t Source val multilabel_margin_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
reduction :int ->
is_target :t ->
t Source val multilabel_margin_loss_out : out :t -> t -> target :t -> reduction :int -> t Source val multinomial : t -> num_samples :int -> replacement :bool -> t Source val multinomial_out : out :t -> t -> num_samples :int -> replacement :bool -> t Source val narrow : t -> dim :int -> start :int -> length :int -> t Source val narrow_copy : t -> dim :int -> start :int -> length :int -> t Source val native_batch_norm :
t ->
weight :t option ->
bias :t option ->
running_mean :t option ->
running_var :t option ->
training :bool ->
momentum :float ->
eps :float ->
t * t * t Source val nll_loss2d :
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
t Source val nll_loss2d_backward :
grad_output :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
total_weight :t ->
t Source val nll_loss2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
total_weight :t ->
t Source val nll_loss2d_out :
out :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
t Source val nll_loss_backward :
grad_output :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
total_weight :t ->
t Source val nll_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
total_weight :t ->
t Source val nll_loss_out :
out :t ->
t ->
target :t ->
weight :t option ->
reduction :int ->
ignore_index :int ->
t Source val norm_except_dim : v :t -> pow :int -> dim :int -> t Source val normal : mean :t -> std :float -> t Source val normal1 : mean :float -> std :t -> t Source val normal_ : t -> mean :float -> std :float -> t Source val normal_out : out :t -> mean :t -> std :float -> t Source val normal_out1 : out :t -> mean :float -> std :t -> t Source val normal_out2 : out :t -> mean :t -> std :t -> t Source val nuclear_norm : t -> keepdim :bool -> t Source val nuclear_norm_out : out :t -> t -> keepdim :bool -> t Source val one_hot : t -> num_classes :int -> t Source val ones_out : out :t -> size :int list -> t Source val ormqr : t -> input2 :t -> input3 :t -> left :bool -> transpose :bool -> t Source val ormqr_out :
out :t ->
t ->
input2 :t ->
input3 :t ->
left :bool ->
transpose :bool ->
t Source val pairwise_distance :
x1 :t ->
x2 :t ->
p :float ->
eps :float ->
keepdim :bool ->
t Source val permute : t -> dims :int list -> t Source val pinverse : t -> rcond :float -> t Source val pixel_shuffle : t -> upscale_factor :int -> t Source val polygamma_out : out :t -> n :int -> t -> t Source val pow_out1 : out :t -> t -> exponent :t -> t Source val prelu_backward : grad_output :t -> t -> weight :t -> t * t Source val prod2 : t -> dim :int -> keepdim :bool -> t Source val prod_out : out :t -> t -> dim :int -> keepdim :bool -> t Source val pstrf_out : u :t -> pivot :t -> t -> upper :bool -> t * t Source val put_ : t -> index :t -> source :t -> accumulate :bool -> t Source val quantize_linear : t -> scale :float -> zero_point :int -> t Source val quantized_lstm :
t ->
hx :t list ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
batch_first :bool ->
t * t * t Source val rand_out : out :t -> size :int list -> t Source val randint_like : t -> high :int -> t Source val randint_like1 : t -> low :int -> high :int -> t Source val randint_out : out :t -> high :int -> size :int list -> t Source val randint_out1 : out :t -> low :int -> high :int -> size :int list -> t Source val randn_out : out :t -> size :int list -> t Source val random_2 : t -> from :int -> to_ :int -> t Source val randperm_out : out :t -> n :int -> t Source val reflection_pad1d : t -> padding :int list -> t Source val reflection_pad1d_backward : grad_output :t -> t -> padding :int list -> t Source val reflection_pad1d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
padding :int list ->
t Source val reflection_pad1d_out : out :t -> t -> padding :int list -> t Source val reflection_pad2d : t -> padding :int list -> t Source val reflection_pad2d_backward : grad_output :t -> t -> padding :int list -> t Source val reflection_pad2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
padding :int list ->
t Source val reflection_pad2d_out : out :t -> t -> padding :int list -> t Source val repeat : t -> repeats :int list -> t Source val repeat_interleave : repeats :t -> t Source val repeat_interleave1 : t -> repeats :t -> dim :int -> t Source val repeat_interleave2 : t -> repeats :int -> dim :int -> t Source val replication_pad1d : t -> padding :int list -> t Source val replication_pad1d_backward : grad_output :t -> t -> padding :int list -> t Source val replication_pad1d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
padding :int list ->
t Source val replication_pad1d_out : out :t -> t -> padding :int list -> t Source val replication_pad2d : t -> padding :int list -> t Source val replication_pad2d_backward : grad_output :t -> t -> padding :int list -> t Source val replication_pad2d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
padding :int list ->
t Source val replication_pad2d_out : out :t -> t -> padding :int list -> t Source val replication_pad3d : t -> padding :int list -> t Source val replication_pad3d_backward : grad_output :t -> t -> padding :int list -> t Source val replication_pad3d_backward_out :
grad_input :t ->
grad_output :t ->
t ->
padding :int list ->
t Source val replication_pad3d_out : out :t -> t -> padding :int list -> t Source val reshape : t -> shape :int list -> t Source val resize_ : t -> size :int list -> t Source val resize_as_ : t -> the_template :t -> t Source val rfft : t -> signal_ndim :int -> normalized :bool -> onesided :bool -> t Source val rnn_relu :
t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
batch_first :bool ->
t * t Source val rnn_relu1 :
data :t ->
batch_sizes :t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
t * t Source val rnn_relu_cell :
t ->
hx :t ->
w_ih :t ->
w_hh :t ->
b_ih :t option ->
b_hh :t option ->
t Source val rnn_tanh :
t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
batch_first :bool ->
t * t Source val rnn_tanh1 :
data :t ->
batch_sizes :t ->
hx :t ->
params :t list ->
has_biases :bool ->
num_layers :int ->
dropout :float ->
train :bool ->
bidirectional :bool ->
t * t Source val rnn_tanh_cell :
t ->
hx :t ->
w_ih :t ->
w_hh :t ->
b_ih :t option ->
b_hh :t option ->
t Source val roll : t -> shifts :int list -> dims :int list -> t Source val rot90 : t -> k :int -> dims :int list -> t Source val rrelu : t -> training :bool -> t Source val rrelu_ : t -> training :bool -> t Source val rrelu_with_noise : t -> noise :t -> training :bool -> t Source val rrelu_with_noise_ : t -> noise :t -> training :bool -> t Source val rrelu_with_noise_out : out :t -> t -> noise :t -> training :bool -> t Source val s_copy_ : t -> src :t -> non_blocking :bool -> t Source val s_native_addmm : t -> mat1 :t -> mat2 :t -> t Source val s_native_addmm_ : t -> mat1 :t -> mat2 :t -> t Source val s_native_addmm_out : out :t -> t -> mat1 :t -> mat2 :t -> t Source val scatter : t -> dim :int -> index :t -> src :t -> t Source val scatter_ : t -> dim :int -> index :t -> src :t -> t Source val scatter_add : t -> dim :int -> index :t -> src :t -> t Source val scatter_add_ : t -> dim :int -> index :t -> src :t -> t Source val set_requires_grad : t -> r :bool -> t Source val sigmoid_backward : grad_output :t -> output :t -> t Source val sigmoid_backward_out : grad_input :t -> grad_output :t -> output :t -> t Source val slice : t -> dim :int -> start :int -> end_ :int -> step :int -> t Source val smooth_l1_loss : t -> target :t -> reduction :int -> t Source val smooth_l1_loss_backward :
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val smooth_l1_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val smooth_l1_loss_out : out :t -> t -> target :t -> reduction :int -> t Source val soft_margin_loss : t -> target :t -> reduction :int -> t Source val soft_margin_loss_backward :
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val soft_margin_loss_backward_out :
grad_input :t ->
grad_output :t ->
t ->
target :t ->
reduction :int ->
t Source val soft_margin_loss_out : out :t -> t -> target :t -> reduction :int -> t Source val solve_out : solution :t -> lu :t -> t -> a :t -> t * t Source val sort : t -> dim :int -> descending :bool -> t * t Source val sort_out :
values :t ->
indices :t ->
t ->
dim :int ->
descending :bool ->
t * t Source val sparse_resize_ : t -> size :int list -> sparse_dim :int -> dense_dim :int -> t Source val sparse_resize_and_clear_ :
t ->
size :int list ->
sparse_dim :int ->
dense_dim :int ->
t Source val split : t -> split_size :int -> dim :int -> t listSource val split_with_sizes : t -> split_sizes :int list -> dim :int -> t listSource val sspaddmm_out : out :t -> t -> mat1 :t -> mat2 :t -> t Source val stack : t list -> dim :int -> t Source val stack_out : out :t -> t list -> dim :int -> t Source val std1 : t -> dim :int list -> unbiased :bool -> keepdim :bool -> t Source val std_out : out :t -> t -> dim :int list -> unbiased :bool -> keepdim :bool -> t Source val stft :
t ->
n_fft :int ->
hop_length :int ->
win_length :int ->
window :t option ->
normalized :bool ->
onesided :bool ->
t Source val sum2 : t -> dim :int list -> keepdim :bool -> t Source val sum_out : out :t -> t -> dim :int list -> keepdim :bool -> t Source val sum_to_size : t -> size :int list -> t Source val svd : t -> some :bool -> compute_uv :bool -> t * t * t Source val svd_out :
u :t ->
s :t ->
v :t ->
t ->
some :bool ->
compute_uv :bool ->
t * t * t Source val symeig : t -> eigenvectors :bool -> upper :bool -> t * t Source val symeig_out : e :t -> v :t -> t -> eigenvectors :bool -> upper :bool -> t * t Source val tanh_backward : grad_output :t -> output :t -> t Source val tanh_backward_out : grad_input :t -> grad_output :t -> output :t -> t Source val tensordot : t -> t -> dims_self :int list -> dims_other :int list -> t Source val to3 : t -> t -> non_blocking :bool -> copy :bool -> t Source val to_dense_backward : grad :t -> t -> t Source val to_mkldnn_backward : grad :t -> t -> t Source val to_sparse1 : t -> sparse_dim :int -> t Source val topk : t -> k :int -> dim :int -> largest :bool -> sorted :bool -> t * t Source val topk_out :
values :t ->
indices :t ->
t ->
k :int ->
dim :int ->
largest :bool ->
sorted :bool ->
t * t Source val transpose : t -> dim0 :int -> dim1 :int -> t Source val transpose_ : t -> dim0 :int -> dim1 :int -> t Source val triangular_solve :
t ->
a :t ->
upper :bool ->
transpose :bool ->
unitriangular :bool ->
t * t Source val triangular_solve_out :
x :t ->
m :t ->
t ->
a :t ->
upper :bool ->
transpose :bool ->
unitriangular :bool ->
t * t Source val tril_ : t -> diagonal :int -> t Source val tril_out : out :t -> t -> diagonal :int -> t Source val triplet_margin_loss :
anchor :t ->
positive :t ->
negative :t ->
margin :float ->
p :float ->
eps :float ->
swap :bool ->
reduction :int ->
t Source val triu_ : t -> diagonal :int -> t Source val triu_out : out :t -> t -> diagonal :int -> t Source val unbind : t -> dim :int -> t listSource val unfold : t -> dimension :int -> size :int -> step :int -> t Source val unique_consecutive :
t ->
return_inverse :bool ->
return_counts :bool ->
dim :int ->
t * t * t Source val unique_dim :
t ->
dim :int ->
sorted :bool ->
return_inverse :bool ->
return_counts :bool ->
t * t * t Source val unique_dim_consecutive :
t ->
dim :int ->
return_inverse :bool ->
return_counts :bool ->
t * t * t Source val unsqueeze_ : t -> dim :int -> t Source val upsample_bicubic2d : t -> output_size :int list -> align_corners :bool -> t Source val upsample_bicubic2d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_bicubic2d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_bicubic2d_out :
out :t ->
t ->
output_size :int list ->
align_corners :bool ->
t Source val upsample_bilinear2d : t -> output_size :int list -> align_corners :bool -> t Source val upsample_bilinear2d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_bilinear2d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_bilinear2d_out :
out :t ->
t ->
output_size :int list ->
align_corners :bool ->
t Source val upsample_linear1d : t -> output_size :int list -> align_corners :bool -> t Source val upsample_linear1d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_linear1d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_linear1d_out :
out :t ->
t ->
output_size :int list ->
align_corners :bool ->
t Source val upsample_nearest1d : t -> output_size :int list -> t Source val upsample_nearest1d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest1d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest1d_out : out :t -> t -> output_size :int list -> t Source val upsample_nearest2d : t -> output_size :int list -> t Source val upsample_nearest2d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest2d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest2d_out : out :t -> t -> output_size :int list -> t Source val upsample_nearest3d : t -> output_size :int list -> t Source val upsample_nearest3d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest3d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
t Source val upsample_nearest3d_out : out :t -> t -> output_size :int list -> t Source val upsample_trilinear3d : t -> output_size :int list -> align_corners :bool -> t Source val upsample_trilinear3d_backward :
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_trilinear3d_backward_out :
grad_input :t ->
grad_output :t ->
output_size :int list ->
input_size :int list ->
align_corners :bool ->
t Source val upsample_trilinear3d_out :
out :t ->
t ->
output_size :int list ->
align_corners :bool ->
t Source val var1 : t -> dim :int list -> unbiased :bool -> keepdim :bool -> t Source val var_out : out :t -> t -> dim :int list -> unbiased :bool -> keepdim :bool -> t Source val view : t -> size :int list -> t Source val zeros_out : out :t -> size :int list -> t Source val int_vec :
?kind :[ `int | `int16 | `int64 | `int8 | `uint8 ] ->
int list ->
t Source val shape2_exn : t -> int * intSource val shape3_exn : t -> int * int * intSource val shape4_exn : t -> int * int * int * intSource val requires_grad : t -> boolSource val grad_set_enabled : bool -> boolSource val select : t -> dim :int -> index :int -> t Source val float_get : t -> int list -> floatSource val int_get : t -> int list -> intSource val float_set : t -> int list -> float -> unitSource val int_set : t -> int list -> int -> unitSource val fill_float : t -> float -> unitSource val fill_int : t -> int -> unitSource val backward : ?keep_graph :bool -> ?create_graph :bool -> t -> unitSource val run_backward :
?keep_graph :bool ->
?create_graph :bool ->
t list ->
t list ->
t listSource val to_string : t -> line_size :int -> stringSource val argmax : ?dim :int -> ?keepdim :bool -> t -> t Source val set_float2 : t -> int -> int -> float -> unitSource val set_float1 : t -> int -> float -> unitSource val set_int2 : t -> int -> int -> int -> unitSource val set_int1 : t -> int -> int -> unitSource val get_float2 : t -> int -> int -> floatSource val get_float1 : t -> int -> floatSource val get_int2 : t -> int -> int -> intSource val get_int1 : t -> int -> intSource val (.%{}) : t -> int list -> intSource val (.%{}<-) : t -> int list -> int -> unitSource val (.%.{}) : t -> int list -> floatSource val (.%.{}<-) : t -> int list -> float -> unitSource val (.%[]<-) : t -> int -> int -> unitSource val (.%.[]) : t -> int -> floatSource val (.%.[]<-) : t -> int -> float -> unitSource val no_grad_ : t -> f :(t -> 'a ) -> 'a Source val no_grad : (unit -> 'a ) -> 'a Source val to_float0 : t -> float option Source val to_float1 : t -> float array optionSource val to_float2 : t -> float array array optionSource val to_float3 : t -> float array array array optionSource val to_float0_exn : t -> floatSource val to_float1_exn : t -> float array Source val to_float2_exn : t -> float array arraySource val to_float3_exn : t -> float array array arraySource val to_int0 : t -> int option Source val to_int1 : t -> int array optionSource val to_int2 : t -> int array array optionSource val to_int3 : t -> int array array array optionSource val to_int1_exn : t -> int array Source val to_int2_exn : t -> int array arraySource val to_int3_exn : t -> int array array arraySource val of_float1 : float array -> t Source val of_float2 : float array array -> t Source val of_float3 : float array array array -> t Source val of_int2 : int array array -> t Source val of_int3 : int array array array -> t Source val conv2d :
?padding :(int * int) ->
?dilation :(int * int) ->
?groups :int ->
t ->
t ->
t option ->
stride :(int * int) ->
t Source val conv_transpose2d :
?output_padding :(int * int) ->
?padding :(int * int) ->
?dilation :(int * int) ->
?groups :int ->
t ->
t ->
t option ->
stride :(int * int) ->
t Source val max_pool2d :
?padding :(int * int) ->
?dilation :(int * int) ->
?ceil_mode :bool ->
?stride :(int * int) ->
t ->
ksize :(int * int) ->
t Source val avg_pool2d :
?padding :(int * int) ->
?count_include_pad :bool ->
?ceil_mode :bool ->
?stride :(int * int) ->
t ->
ksize :(int * int) ->
t Source val const_batch_norm : ?momentum :float -> ?eps :float -> t -> t Source val dropout : t -> p :float -> is_training :bool -> t Source val print_shape : ?name :string -> t -> unit