Module Torch.TensorSource

include module type of Torch_core.Wrapper.Tensor with type t := t
include Torch_core.Wrapper_generated_intf.S with type t := t and type scalar := Torch_core.Wrapper.Scalar.t
Sourceval abs : t -> t
Sourceval abs_ : t -> t
Sourceval abs_out : out:t -> t -> t
Sourceval acos : t -> t
Sourceval acos_ : t -> t
Sourceval acos_out : out:t -> t -> t
Sourceval adaptive_avg_pool1d : t -> output_size:int list -> t
Sourceval adaptive_avg_pool2d : t -> output_size:int list -> t
Sourceval adaptive_avg_pool2d_out : out:t -> t -> output_size:int list -> t
Sourceval adaptive_avg_pool3d : t -> output_size:int list -> t
Sourceval adaptive_avg_pool3d_backward : grad_output:t -> t -> t
Sourceval adaptive_avg_pool3d_backward_out : grad_input:t -> grad_output:t -> t -> t
Sourceval adaptive_avg_pool3d_out : out:t -> t -> output_size:int list -> t
Sourceval adaptive_max_pool1d : t -> output_size:int list -> t * t
Sourceval adaptive_max_pool2d : t -> output_size:int list -> t * t
Sourceval adaptive_max_pool2d_backward : grad_output:t -> t -> indices:t -> t
Sourceval adaptive_max_pool2d_backward_out : grad_input:t -> grad_output:t -> t -> indices:t -> t
Sourceval adaptive_max_pool2d_out : out:t -> indices:t -> t -> output_size:int list -> t * t
Sourceval adaptive_max_pool3d : t -> output_size:int list -> t * t
Sourceval adaptive_max_pool3d_backward : grad_output:t -> t -> indices:t -> t
Sourceval adaptive_max_pool3d_backward_out : grad_input:t -> grad_output:t -> t -> indices:t -> t
Sourceval adaptive_max_pool3d_out : out:t -> indices:t -> t -> output_size:int list -> t * t
Sourceval add : t -> t -> t
Sourceval add_ : t -> t -> t
Sourceval add_out : out:t -> t -> t -> t
Sourceval addbmm : t -> batch1:t -> batch2:t -> t
Sourceval addbmm_ : t -> batch1:t -> batch2:t -> t
Sourceval addbmm_out : out:t -> t -> batch1:t -> batch2:t -> t
Sourceval addcdiv : t -> tensor1:t -> tensor2:t -> t
Sourceval addcdiv_ : t -> tensor1:t -> tensor2:t -> t
Sourceval addcdiv_out : out:t -> t -> tensor1:t -> tensor2:t -> t
Sourceval addcmul : t -> tensor1:t -> tensor2:t -> t
Sourceval addcmul_ : t -> tensor1:t -> tensor2:t -> t
Sourceval addcmul_out : out:t -> t -> tensor1:t -> tensor2:t -> t
Sourceval addmm : t -> mat1:t -> mat2:t -> t
Sourceval addmm_ : t -> mat1:t -> mat2:t -> t
Sourceval addmm_out : out:t -> t -> mat1:t -> mat2:t -> t
Sourceval addmv : t -> mat:t -> vec:t -> t
Sourceval addmv_ : t -> mat:t -> vec:t -> t
Sourceval addmv_out : out:t -> t -> mat:t -> vec:t -> t
Sourceval addr : t -> vec1:t -> vec2:t -> t
Sourceval addr_ : t -> vec1:t -> vec2:t -> t
Sourceval addr_out : out:t -> t -> vec1:t -> vec2:t -> t
Sourceval affine_grid_generator : theta:t -> size:int list -> t
Sourceval affine_grid_generator_backward : grad:t -> size:int list -> t
Sourceval alias : t -> t
Sourceval all : t -> t
Sourceval all1 : t -> dim:int -> keepdim:bool -> t
Sourceval all_out : out:t -> t -> dim:int -> keepdim:bool -> t
Sourceval alpha_dropout : t -> p:float -> train:bool -> t
Sourceval alpha_dropout_ : t -> p:float -> train:bool -> t
Sourceval any : t -> t
Sourceval any1 : t -> dim:int -> keepdim:bool -> t
Sourceval any_out : out:t -> t -> dim:int -> keepdim:bool -> t
Sourceval arange_out : out:t -> end_:Torch_core.Wrapper.Scalar.t -> t
Sourceval arange_out1 : out:t -> start:Torch_core.Wrapper.Scalar.t -> end_:Torch_core.Wrapper.Scalar.t -> t
Sourceval argmin : t -> dim:int -> keepdim:bool -> t
Sourceval argsort : t -> dim:int -> descending:bool -> t
Sourceval as_strided : t -> size:int list -> stride:int list -> storage_offset:int -> t
Sourceval as_strided_ : t -> size:int list -> stride:int list -> storage_offset:int -> t
Sourceval asin : t -> t
Sourceval asin_ : t -> t
Sourceval asin_out : out:t -> t -> t
Sourceval atan : t -> t
Sourceval atan2 : t -> t -> t
Sourceval atan2_ : t -> t -> t
Sourceval atan2_out : out:t -> t -> t -> t
Sourceval atan_ : t -> t
Sourceval atan_out : out:t -> t -> t
Sourceval avg_pool1d : t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool2d_backward : grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool2d_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool2d_out : out:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool3d : t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool3d_backward : grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool3d_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval avg_pool3d_out : out:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> ceil_mode:bool -> count_include_pad:bool -> t
Sourceval baddbmm : t -> batch1:t -> batch2:t -> t
Sourceval baddbmm_ : t -> batch1:t -> batch2:t -> t
Sourceval baddbmm_out : out:t -> t -> batch1:t -> batch2:t -> t
Sourceval bartlett_window : window_length:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval bartlett_window1 : window_length:int -> periodic:bool -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval batch_norm : t -> weight:t option -> bias:t option -> running_mean:t option -> running_var:t option -> training:bool -> momentum:float -> eps:float -> cudnn_enabled:bool -> t
Sourceval batch_norm_backward_elemt : grad_out:t -> t -> mean:t -> invstd:t -> weight:t option -> mean_dy:t -> mean_dy_xmu:t -> t
Sourceval batch_norm_backward_reduce : grad_out:t -> t -> mean:t -> invstd:t -> input_g:bool -> weight_g:bool -> bias_g:bool -> t * t * t * t
Sourceval batch_norm_elemt : t -> weight:t option -> bias:t option -> mean:t -> invstd:t -> eps:float -> t
Sourceval batch_norm_gather_stats : t -> mean:t -> invstd:t -> running_mean:t option -> running_var:t option -> momentum:float -> eps:float -> count:int -> t * t
Sourceval batch_norm_stats : t -> eps:float -> t * t
Sourceval batch_norm_update_stats : t -> running_mean:t option -> running_var:t option -> momentum:float -> t * t
Sourceval bernoulli : t -> t
Sourceval bernoulli1 : t -> p:float -> t
Sourceval bernoulli_ : t -> p:t -> t
Sourceval bernoulli_1 : t -> p:float -> t
Sourceval bernoulli_out : out:t -> t -> t
Sourceval bilinear : input1:t -> input2:t -> weight:t -> bias:t option -> t
Sourceval binary_cross_entropy : t -> target:t -> weight:t option -> reduction:int -> t
Sourceval binary_cross_entropy_backward : grad_output:t -> t -> target:t -> weight:t -> reduction:int -> t
Sourceval binary_cross_entropy_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> weight:t -> reduction:int -> t
Sourceval binary_cross_entropy_out : out:t -> t -> target:t -> weight:t option -> reduction:int -> t
Sourceval binary_cross_entropy_with_logits : t -> target:t -> weight:t option -> pos_weight:t option -> reduction:int -> t
Sourceval binary_cross_entropy_with_logits_backward : grad_output:t -> t -> target:t -> weight:t option -> pos_weight:t option -> reduction:int -> t
Sourceval bincount : t -> weights:t option -> minlength:int -> t
Sourceval blackman_window : window_length:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval blackman_window1 : window_length:int -> periodic:bool -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval bmm : t -> mat2:t -> t
Sourceval bmm_out : out:t -> t -> mat2:t -> t
Sourceval broadcast_tensors : t list -> t list
Sourceval cartesian_prod : t list -> t
Sourceval cat : t list -> dim:int -> t
Sourceval cat_out : out:t -> t list -> dim:int -> t
Sourceval cauchy_ : t -> median:float -> sigma:float -> t
Sourceval cdist : x1:t -> x2:t -> p:float -> t
Sourceval ceil : t -> t
Sourceval ceil_ : t -> t
Sourceval ceil_out : out:t -> t -> t
Sourceval celu : t -> t
Sourceval celu_ : t -> t
Sourceval chain_matmul : matrices:t list -> t
Sourceval cholesky : t -> upper:bool -> t
Sourceval cholesky_inverse : t -> upper:bool -> t
Sourceval cholesky_inverse_out : out:t -> t -> upper:bool -> t
Sourceval cholesky_out : out:t -> t -> upper:bool -> t
Sourceval cholesky_solve : t -> input2:t -> upper:bool -> t
Sourceval cholesky_solve_out : out:t -> t -> input2:t -> upper:bool -> t
Sourceval chunk : t -> chunks:int -> dim:int -> t list
Sourceval clamp_max : t -> max:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_max_ : t -> max:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_max_out : out:t -> t -> max:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_min : t -> min:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_min_ : t -> min:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_min_out : out:t -> t -> min:Torch_core.Wrapper.Scalar.t -> t
Sourceval clamp_out : out:t -> t -> min:Torch_core.Wrapper.Scalar.t -> max:Torch_core.Wrapper.Scalar.t -> t
Sourceval clone : t -> t
Sourceval coalesce : t -> t
Sourceval combinations : t -> r:int -> with_replacement:bool -> t
Sourceval constant_pad_nd : t -> pad:int list -> t
Sourceval contiguous : t -> t
Sourceval conv1d : t -> weight:t -> bias:t option -> stride:int list -> padding:int list -> dilation:int list -> groups:int -> t
Sourceval conv3d : t -> weight:t -> bias:t option -> stride:int list -> padding:int list -> dilation:int list -> groups:int -> t
Sourceval conv_tbc : t -> weight:t -> bias:t -> pad:int -> t
Sourceval conv_tbc_backward : t -> t -> weight:t -> bias:t -> pad:int -> t * t * t
Sourceval conv_transpose1d : t -> weight:t -> bias:t option -> stride:int list -> padding:int list -> output_padding:int list -> groups:int -> dilation:int list -> t
Sourceval conv_transpose3d : t -> weight:t -> bias:t option -> stride:int list -> padding:int list -> output_padding:int list -> groups:int -> dilation:int list -> t
Sourceval convolution : t -> weight:t -> bias:t option -> stride:int list -> padding:int list -> dilation:int list -> transposed:bool -> output_padding:int list -> groups:int -> t
Sourceval copy_sparse_to_sparse_ : t -> src:t -> non_blocking:bool -> t
Sourceval cos : t -> t
Sourceval cos_ : t -> t
Sourceval cos_out : out:t -> t -> t
Sourceval cosh : t -> t
Sourceval cosh_ : t -> t
Sourceval cosh_out : out:t -> t -> t
Sourceval cosine_embedding_loss : input1:t -> input2:t -> target:t -> margin:float -> reduction:int -> t
Sourceval cosine_similarity : x1:t -> x2:t -> dim:int -> eps:float -> t
Sourceval cross : t -> t -> dim:int -> t
Sourceval cross_out : out:t -> t -> t -> dim:int -> t
Sourceval ctc_loss : log_probs:t -> targets:t -> input_lengths:int list -> target_lengths:int list -> blank:int -> reduction:int -> zero_infinity:bool -> t
Sourceval ctc_loss1 : log_probs:t -> targets:t -> input_lengths:t -> target_lengths:t -> blank:int -> reduction:int -> zero_infinity:bool -> t
Sourceval cudnn_affine_grid_generator : theta:t -> n:int -> c:int -> h:int -> w:int -> t
Sourceval cudnn_affine_grid_generator_backward : grad:t -> n:int -> c:int -> h:int -> w:int -> t
Sourceval cudnn_batch_norm : t -> weight:t -> bias:t option -> running_mean:t option -> running_var:t option -> training:bool -> exponential_average_factor:float -> epsilon:float -> t * t * t
Sourceval cudnn_batch_norm_backward : t -> grad_output:t -> weight:t -> running_mean:t option -> running_var:t option -> save_mean:t option -> save_var:t option -> epsilon:float -> t * t * t
Sourceval cudnn_convolution : t -> weight:t -> bias:t option -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_convolution_backward_bias : grad_output:t -> t
Sourceval cudnn_convolution_backward_input : self_size:int list -> grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_convolution_backward_weight : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_convolution_transpose : t -> weight:t -> bias:t option -> padding:int list -> output_padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_convolution_transpose_backward_bias : grad_output:t -> t
Sourceval cudnn_convolution_transpose_backward_input : grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_convolution_transpose_backward_weight : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval cudnn_grid_sampler : t -> grid:t -> t
Sourceval cudnn_grid_sampler_backward : t -> grid:t -> grad_output:t -> t * t
Sourceval cumprod : t -> dim:int -> t
Sourceval cumprod1 : t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval cumprod_out : out:t -> t -> dim:int -> t
Sourceval cumprod_out1 : out:t -> t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval cumsum : t -> dim:int -> t
Sourceval cumsum1 : t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval cumsum_out : out:t -> t -> dim:int -> t
Sourceval cumsum_out1 : out:t -> t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval dequantize : t -> t
Sourceval det : t -> t
Sourceval detach : t -> t
Sourceval detach_ : t -> t
Sourceval diag : t -> diagonal:int -> t
Sourceval diag_embed : t -> offset:int -> dim1:int -> dim2:int -> t
Sourceval diag_out : out:t -> t -> diagonal:int -> t
Sourceval diagflat : t -> offset:int -> t
Sourceval diagonal : t -> offset:int -> dim1:int -> dim2:int -> t
Sourceval digamma : t -> t
Sourceval digamma_ : t -> t
Sourceval digamma_out : out:t -> t -> t
Sourceval dist : t -> t -> t
Sourceval div : t -> t -> t
Sourceval div_ : t -> t -> t
Sourceval div_out : out:t -> t -> t -> t
Sourceval dot : t -> t -> t
Sourceval dot_out : out:t -> t -> t -> t
Sourceval dropout_ : t -> p:float -> train:bool -> t
Sourceval eig : t -> eigenvectors:bool -> t * t
Sourceval eig_out : e:t -> v:t -> t -> eigenvectors:bool -> t * t
Sourceval elu : t -> t
Sourceval elu_ : t -> t
Sourceval elu_backward : grad_output:t -> alpha:Torch_core.Wrapper.Scalar.t -> scale:Torch_core.Wrapper.Scalar.t -> input_scale:Torch_core.Wrapper.Scalar.t -> output:t -> t
Sourceval elu_backward_out : grad_input:t -> grad_output:t -> alpha:Torch_core.Wrapper.Scalar.t -> scale:Torch_core.Wrapper.Scalar.t -> input_scale:Torch_core.Wrapper.Scalar.t -> output:t -> t
Sourceval elu_out : out:t -> t -> t
Sourceval embedding : weight:t -> indices:t -> padding_idx:int -> scale_grad_by_freq:bool -> sparse:bool -> t
Sourceval embedding_backward : grad:t -> indices:t -> num_weights:int -> padding_idx:int -> scale_grad_by_freq:bool -> sparse:bool -> t
Sourceval embedding_bag : weight:t -> indices:t -> offsets:t -> scale_grad_by_freq:bool -> mode:int -> sparse:bool -> per_sample_weights:t option -> t * t * t * t
Sourceval embedding_dense_backward : grad_output:t -> indices:t -> num_weights:int -> padding_idx:int -> scale_grad_by_freq:bool -> t
Sourceval embedding_renorm_ : t -> indices:t -> max_norm:float -> norm_type:float -> t
Sourceval embedding_sparse_backward : grad:t -> indices:t -> num_weights:int -> padding_idx:int -> scale_grad_by_freq:bool -> t
Sourceval empty : size:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval empty_like : t -> t
Sourceval empty_like1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval empty_out : out:t -> size:int list -> t
Sourceval empty_strided : size:int list -> stride:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval eq1 : t -> t -> t
Sourceval eq_1 : t -> t -> t
Sourceval eq_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval eq_out1 : out:t -> t -> t -> t
Sourceval erf : t -> t
Sourceval erf_ : t -> t
Sourceval erf_out : out:t -> t -> t
Sourceval erfc : t -> t
Sourceval erfc_ : t -> t
Sourceval erfc_out : out:t -> t -> t
Sourceval erfinv : t -> t
Sourceval erfinv_ : t -> t
Sourceval erfinv_out : out:t -> t -> t
Sourceval exp : t -> t
Sourceval exp_ : t -> t
Sourceval exp_out : out:t -> t -> t
Sourceval expand : t -> size:int list -> implicit:bool -> t
Sourceval expand_as : t -> t -> t
Sourceval expm1 : t -> t
Sourceval expm1_ : t -> t
Sourceval expm1_out : out:t -> t -> t
Sourceval exponential_ : t -> lambd:float -> t
Sourceval eye : n:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval eye1 : n:int -> m:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval eye_out : out:t -> n:int -> t
Sourceval eye_out1 : out:t -> n:int -> m:int -> t
Sourceval fbgemm_linear_int8_weight : t -> weight:t -> packed:t -> col_offsets:t -> weight_scale:Torch_core.Wrapper.Scalar.t -> weight_zero_point:Torch_core.Wrapper.Scalar.t -> bias:t -> t
Sourceval fbgemm_pack_quantized_matrix : t -> k:int -> n:int -> t
Sourceval feature_alpha_dropout : t -> p:float -> train:bool -> t
Sourceval feature_alpha_dropout_ : t -> p:float -> train:bool -> t
Sourceval feature_dropout : t -> p:float -> train:bool -> t
Sourceval feature_dropout_ : t -> p:float -> train:bool -> t
Sourceval fft : t -> signal_ndim:int -> normalized:bool -> t
Sourceval fill_ : t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval fill_1 : t -> value:t -> t
Sourceval flip : t -> dims:int list -> t
Sourceval floor : t -> t
Sourceval floor_ : t -> t
Sourceval floor_out : out:t -> t -> t
Sourceval fmod1 : t -> t -> t
Sourceval fmod_1 : t -> t -> t
Sourceval fmod_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval fmod_out1 : out:t -> t -> t -> t
Sourceval frac : t -> t
Sourceval frac_ : t -> t
Sourceval frac_out : out:t -> t -> t
Sourceval fractional_max_pool2d : t -> kernel_size:int list -> output_size:int list -> random_samples:t -> t * t
Sourceval fractional_max_pool2d_backward : grad_output:t -> t -> kernel_size:int list -> output_size:int list -> indices:t -> t
Sourceval fractional_max_pool2d_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> output_size:int list -> indices:t -> t
Sourceval fractional_max_pool2d_out : output:t -> indices:t -> t -> kernel_size:int list -> output_size:int list -> random_samples:t -> t * t
Sourceval fractional_max_pool3d : t -> kernel_size:int list -> output_size:int list -> random_samples:t -> t * t
Sourceval fractional_max_pool3d_backward : grad_output:t -> t -> kernel_size:int list -> output_size:int list -> indices:t -> t
Sourceval fractional_max_pool3d_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> output_size:int list -> indices:t -> t
Sourceval fractional_max_pool3d_out : output:t -> indices:t -> t -> kernel_size:int list -> output_size:int list -> random_samples:t -> t * t
Sourceval frobenius_norm : t -> t
Sourceval frobenius_norm1 : t -> dim:int list -> keepdim:bool -> t
Sourceval frobenius_norm_out : out:t -> t -> dim:int list -> keepdim:bool -> t
Sourceval full : size:int list -> fill_value:Torch_core.Wrapper.Scalar.t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval full_like : t -> fill_value:Torch_core.Wrapper.Scalar.t -> t
Sourceval full_like1 : t -> fill_value:Torch_core.Wrapper.Scalar.t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval full_out : out:t -> size:int list -> fill_value:Torch_core.Wrapper.Scalar.t -> t
Sourceval gather : t -> dim:int -> index:t -> sparse_grad:bool -> t
Sourceval gather_out : out:t -> t -> dim:int -> index:t -> sparse_grad:bool -> t
Sourceval ge1 : t -> t -> t
Sourceval ge_1 : t -> t -> t
Sourceval ge_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval ge_out1 : out:t -> t -> t -> t
Sourceval gels : t -> a:t -> t * t
Sourceval gels_out : x:t -> qr:t -> t -> a:t -> t * t
Sourceval geometric_ : t -> p:float -> t
Sourceval geqrf : t -> t * t
Sourceval geqrf_out : a:t -> tau:t -> t -> t * t
Sourceval ger : t -> vec2:t -> t
Sourceval ger_out : out:t -> t -> vec2:t -> t
Sourceval glu : t -> dim:int -> t
Sourceval glu_backward : grad_output:t -> t -> dim:int -> t
Sourceval glu_backward_out : grad_input:t -> grad_output:t -> t -> dim:int -> t
Sourceval glu_out : out:t -> t -> dim:int -> t
Sourceval grad : t -> t
Sourceval grid_sampler : t -> grid:t -> interpolation_mode:int -> padding_mode:int -> t
Sourceval grid_sampler_2d : t -> grid:t -> interpolation_mode:int -> padding_mode:int -> t
Sourceval grid_sampler_2d_backward : grad_output:t -> t -> grid:t -> interpolation_mode:int -> padding_mode:int -> t * t
Sourceval grid_sampler_3d : t -> grid:t -> interpolation_mode:int -> padding_mode:int -> t
Sourceval grid_sampler_3d_backward : grad_output:t -> t -> grid:t -> interpolation_mode:int -> padding_mode:int -> t * t
Sourceval group_norm : t -> num_groups:int -> weight:t option -> bias:t option -> eps:float -> cudnn_enabled:bool -> t
Sourceval gru : t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> batch_first:bool -> t * t
Sourceval gru1 : data:t -> batch_sizes:t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> t * t
Sourceval gru_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t option -> b_hh:t option -> t
Sourceval gt1 : t -> t -> t
Sourceval gt_1 : t -> t -> t
Sourceval gt_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval gt_out1 : out:t -> t -> t -> t
Sourceval hamming_window : window_length:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hamming_window1 : window_length:int -> periodic:bool -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hamming_window2 : window_length:int -> periodic:bool -> alpha:float -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hamming_window3 : window_length:int -> periodic:bool -> alpha:float -> beta:float -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hann_window : window_length:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hann_window1 : window_length:int -> periodic:bool -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval hardshrink : t -> t
Sourceval hardshrink_backward : grad_out:t -> t -> lambd:Torch_core.Wrapper.Scalar.t -> t
Sourceval hardtanh : t -> t
Sourceval hardtanh_ : t -> t
Sourceval hardtanh_backward : grad_output:t -> t -> min_val:Torch_core.Wrapper.Scalar.t -> max_val:Torch_core.Wrapper.Scalar.t -> t
Sourceval hardtanh_backward_out : grad_input:t -> grad_output:t -> t -> min_val:Torch_core.Wrapper.Scalar.t -> max_val:Torch_core.Wrapper.Scalar.t -> t
Sourceval hardtanh_out : out:t -> t -> t
Sourceval hinge_embedding_loss : t -> target:t -> margin:float -> reduction:int -> t
Sourceval histc : t -> bins:int -> t
Sourceval histc_out : out:t -> t -> bins:int -> t
Sourceval hspmm : mat1:t -> mat2:t -> t
Sourceval hspmm_out : out:t -> mat1:t -> mat2:t -> t
Sourceval ifft : t -> signal_ndim:int -> normalized:bool -> t
Sourceval index : t -> indices:t list -> t
Sourceval index_add : t -> dim:int -> index:t -> source:t -> t
Sourceval index_add_ : t -> dim:int -> index:t -> source:t -> t
Sourceval index_copy : t -> dim:int -> index:t -> source:t -> t
Sourceval index_copy_ : t -> dim:int -> index:t -> source:t -> t
Sourceval index_fill : t -> dim:int -> index:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval index_fill1 : t -> dim:int -> index:t -> value:t -> t
Sourceval index_fill_ : t -> dim:int -> index:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval index_fill_1 : t -> dim:int -> index:t -> value:t -> t
Sourceval index_put : t -> indices:t list -> values:t -> accumulate:bool -> t
Sourceval index_put_ : t -> indices:t list -> values:t -> accumulate:bool -> t
Sourceval index_select : t -> dim:int -> index:t -> t
Sourceval index_select_out : out:t -> t -> dim:int -> index:t -> t
Sourceval indices : t -> t
Sourceval instance_norm : t -> weight:t option -> bias:t option -> running_mean:t option -> running_var:t option -> use_input_stats:bool -> momentum:float -> eps:float -> cudnn_enabled:bool -> t
Sourceval int_repr : t -> t
Sourceval inverse : t -> t
Sourceval inverse_out : out:t -> t -> t
Sourceval irfft : t -> signal_ndim:int -> normalized:bool -> onesided:bool -> signal_sizes:int list -> t
Sourceval isclose : t -> t -> rtol:float -> atol:float -> equal_nan:bool -> t
Sourceval isnan : t -> t
Sourceval kl_div : t -> target:t -> reduction:int -> t
Sourceval kl_div_backward : grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval kthvalue : t -> k:int -> dim:int -> keepdim:bool -> t * t
Sourceval kthvalue_out : values:t -> indices:t -> t -> k:int -> dim:int -> keepdim:bool -> t * t
Sourceval l1_loss : t -> target:t -> reduction:int -> t
Sourceval l1_loss_backward : grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval l1_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval l1_loss_out : out:t -> t -> target:t -> reduction:int -> t
Sourceval layer_norm : t -> normalized_shape:int list -> weight:t option -> bias:t option -> eps:float -> cudnn_enable:bool -> t
Sourceval le1 : t -> t -> t
Sourceval le_1 : t -> t -> t
Sourceval le_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval le_out1 : out:t -> t -> t -> t
Sourceval leaky_relu : t -> t
Sourceval leaky_relu_ : t -> t
Sourceval leaky_relu_backward : grad_output:t -> t -> negative_slope:Torch_core.Wrapper.Scalar.t -> t
Sourceval leaky_relu_backward_out : grad_input:t -> grad_output:t -> t -> negative_slope:Torch_core.Wrapper.Scalar.t -> t
Sourceval leaky_relu_out : out:t -> t -> t
Sourceval lerp : t -> end_:t -> weight:Torch_core.Wrapper.Scalar.t -> t
Sourceval lerp1 : t -> end_:t -> weight:t -> t
Sourceval lerp_ : t -> end_:t -> weight:Torch_core.Wrapper.Scalar.t -> t
Sourceval lerp_1 : t -> end_:t -> weight:t -> t
Sourceval lerp_out : out:t -> t -> end_:t -> weight:Torch_core.Wrapper.Scalar.t -> t
Sourceval lerp_out1 : out:t -> t -> end_:t -> weight:t -> t
Sourceval lgamma : t -> t
Sourceval lgamma_ : t -> t
Sourceval lgamma_out : out:t -> t -> t
Sourceval linear : t -> weight:t -> bias:t option -> t
Sourceval linspace_out : out:t -> start:Torch_core.Wrapper.Scalar.t -> end_:Torch_core.Wrapper.Scalar.t -> steps:int -> t
Sourceval log : t -> t
Sourceval log10 : t -> t
Sourceval log10_ : t -> t
Sourceval log10_out : out:t -> t -> t
Sourceval log1p : t -> t
Sourceval log1p_ : t -> t
Sourceval log1p_out : out:t -> t -> t
Sourceval log2 : t -> t
Sourceval log2_ : t -> t
Sourceval log2_out : out:t -> t -> t
Sourceval log_ : t -> t
Sourceval log_normal_ : t -> mean:float -> std:float -> t
Sourceval log_out : out:t -> t -> t
Sourceval log_sigmoid : t -> t
Sourceval log_sigmoid_backward : grad_output:t -> t -> buffer:t -> t
Sourceval log_sigmoid_backward_out : grad_input:t -> grad_output:t -> t -> buffer:t -> t
Sourceval log_sigmoid_out : out:t -> t -> t
Sourceval log_softmax : t -> dim:int -> t
Sourceval log_softmax1 : t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval logdet : t -> t
Sourceval logspace : start:Torch_core.Wrapper.Scalar.t -> end_:Torch_core.Wrapper.Scalar.t -> steps:int -> base:float -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval logspace_out : out:t -> start:Torch_core.Wrapper.Scalar.t -> end_:Torch_core.Wrapper.Scalar.t -> steps:int -> base:float -> t
Sourceval logsumexp : t -> dim:int list -> keepdim:bool -> t
Sourceval logsumexp_out : out:t -> t -> dim:int list -> keepdim:bool -> t
Sourceval lstm : t -> hx:t list -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> batch_first:bool -> t * t * t
Sourceval lstm1 : data:t -> batch_sizes:t -> hx:t list -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> t * t * t
Sourceval lstm_cell : t -> hx:t list -> w_ih:t -> w_hh:t -> b_ih:t option -> b_hh:t option -> t * t
Sourceval lt1 : t -> t -> t
Sourceval lt_1 : t -> t -> t
Sourceval lt_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval lt_out1 : out:t -> t -> t -> t
Sourceval lu_solve : t -> lu_data:t -> lu_pivots:t -> t
Sourceval lu_solve_out : out:t -> t -> lu_data:t -> lu_pivots:t -> t
Sourceval margin_ranking_loss : input1:t -> input2:t -> target:t -> margin:float -> reduction:int -> t
Sourceval masked_fill : t -> mask:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval masked_fill1 : t -> mask:t -> value:t -> t
Sourceval masked_fill_ : t -> mask:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval masked_fill_1 : t -> mask:t -> value:t -> t
Sourceval masked_scatter : t -> mask:t -> source:t -> t
Sourceval masked_scatter_ : t -> mask:t -> source:t -> t
Sourceval masked_select : t -> mask:t -> t
Sourceval masked_select_out : out:t -> t -> mask:t -> t
Sourceval matmul : t -> t -> t
Sourceval matmul_out : out:t -> t -> t -> t
Sourceval matrix_power : t -> n:int -> t
Sourceval matrix_rank : t -> symmetric:bool -> t
Sourceval matrix_rank1 : t -> tol:float -> symmetric:bool -> t
Sourceval max1 : t -> t -> t
Sourceval max2 : t -> dim:int -> keepdim:bool -> t * t
Sourceval max_out : out:t -> t -> t -> t
Sourceval max_out1 : max:t -> max_values:t -> t -> dim:int -> keepdim:bool -> t * t
Sourceval max_pool1d : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t
Sourceval max_pool1d_with_indices : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t * t
Sourceval max_pool2d_with_indices : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t * t
Sourceval max_pool2d_with_indices_backward : grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> indices:t -> t
Sourceval max_pool2d_with_indices_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> indices:t -> t
Sourceval max_pool2d_with_indices_out : output:t -> indices:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t * t
Sourceval max_pool3d : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t
Sourceval max_pool3d_with_indices : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t * t
Sourceval max_pool3d_with_indices_backward : grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> indices:t -> t
Sourceval max_pool3d_with_indices_backward_out : grad_input:t -> grad_output:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> indices:t -> t
Sourceval max_pool3d_with_indices_out : output:t -> indices:t -> t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t * t
Sourceval max_unpool2d : t -> indices:t -> output_size:int list -> t
Sourceval max_unpool2d_backward : grad_output:t -> t -> indices:t -> output_size:int list -> t
Sourceval max_unpool2d_backward_out : grad_input:t -> grad_output:t -> t -> indices:t -> output_size:int list -> t
Sourceval max_unpool2d_out : out:t -> t -> indices:t -> output_size:int list -> t
Sourceval max_unpool3d : t -> indices:t -> output_size:int list -> stride:int list -> padding:int list -> t
Sourceval max_unpool3d_backward : grad_output:t -> t -> indices:t -> output_size:int list -> stride:int list -> padding:int list -> t
Sourceval max_unpool3d_backward_out : grad_input:t -> grad_output:t -> t -> indices:t -> output_size:int list -> stride:int list -> padding:int list -> t
Sourceval max_unpool3d_out : out:t -> t -> indices:t -> output_size:int list -> stride:int list -> padding:int list -> t
Sourceval max_values : t -> dim:int list -> keepdim:bool -> t
Sourceval mean1 : t -> dtype:Torch_core.Kind.t -> t
Sourceval mean2 : t -> dim:int list -> keepdim:bool -> t
Sourceval mean3 : t -> dim:int list -> dtype:Torch_core.Kind.t -> t
Sourceval mean4 : t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval mean_out : out:t -> t -> dim:int list -> keepdim:bool -> t
Sourceval mean_out1 : out:t -> t -> dim:int list -> dtype:Torch_core.Kind.t -> t
Sourceval mean_out2 : out:t -> t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval median : t -> t
Sourceval median1 : t -> dim:int -> keepdim:bool -> t * t
Sourceval median_out : values:t -> indices:t -> t -> dim:int -> keepdim:bool -> t * t
Sourceval meshgrid : t list -> t list
Sourceval min1 : t -> t -> t
Sourceval min2 : t -> dim:int -> keepdim:bool -> t * t
Sourceval min_out : out:t -> t -> t -> t
Sourceval min_out1 : min:t -> min_indices:t -> t -> dim:int -> keepdim:bool -> t * t
Sourceval min_values : t -> dim:int list -> keepdim:bool -> t
Sourceval miopen_batch_norm : t -> weight:t -> bias:t option -> running_mean:t option -> running_var:t option -> training:bool -> exponential_average_factor:float -> epsilon:float -> t * t * t
Sourceval miopen_batch_norm_backward : t -> grad_output:t -> weight:t -> running_mean:t option -> running_var:t option -> save_mean:t option -> save_var:t option -> epsilon:float -> t * t * t
Sourceval miopen_convolution : t -> weight:t -> bias:t option -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_convolution_backward_bias : grad_output:t -> t
Sourceval miopen_convolution_backward_input : self_size:int list -> grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_convolution_backward_weight : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_convolution_transpose : t -> weight:t -> bias:t option -> padding:int list -> output_padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_convolution_transpose_backward_input : grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_convolution_transpose_backward_weight : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_depthwise_convolution : t -> weight:t -> bias:t option -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_depthwise_convolution_backward_input : self_size:int list -> grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval miopen_depthwise_convolution_backward_weight : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> benchmark:bool -> deterministic:bool -> t
Sourceval mkldnn_convolution : t -> weight:t -> bias:t option -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> t
Sourceval mkldnn_convolution_backward_input : self_size:int list -> grad_output:t -> weight:t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> bias_defined:bool -> t
Sourceval mkldnn_convolution_backward_weights : weight_size:int list -> grad_output:t -> t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> bias_defined:bool -> t * t
Sourceval mkldnn_linear : t -> weight:t -> bias:t option -> t
Sourceval mkldnn_max_pool2d : t -> kernel_size:int list -> stride:int list -> padding:int list -> dilation:int list -> ceil_mode:bool -> t
Sourceval mkldnn_reorder_conv2d_weight : t -> padding:int list -> stride:int list -> dilation:int list -> groups:int -> t
Sourceval mkldnn_reshape : t -> shape:int list -> t
Sourceval mm_out : out:t -> t -> mat2:t -> t
Sourceval mode : t -> dim:int -> keepdim:bool -> t * t
Sourceval mode_out : values:t -> indices:t -> t -> dim:int -> keepdim:bool -> t * t
Sourceval mse_loss_backward : grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval mse_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval mse_loss_out : out:t -> t -> target:t -> reduction:int -> t
Sourceval mul : t -> t -> t
Sourceval mul_ : t -> t -> t
Sourceval mul_out : out:t -> t -> t -> t
Sourceval multi_margin_loss_backward : grad_output:t -> t -> target:t -> p:Torch_core.Wrapper.Scalar.t -> margin:Torch_core.Wrapper.Scalar.t -> weight:t -> reduction:int -> t
Sourceval multi_margin_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> p:Torch_core.Wrapper.Scalar.t -> margin:Torch_core.Wrapper.Scalar.t -> weight:t -> reduction:int -> t
Sourceval multilabel_margin_loss : t -> target:t -> reduction:int -> t
Sourceval multilabel_margin_loss_backward : grad_output:t -> t -> target:t -> reduction:int -> is_target:t -> t
Sourceval multilabel_margin_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> reduction:int -> is_target:t -> t
Sourceval multilabel_margin_loss_out : out:t -> t -> target:t -> reduction:int -> t
Sourceval multinomial : t -> num_samples:int -> replacement:bool -> t
Sourceval multinomial_out : out:t -> t -> num_samples:int -> replacement:bool -> t
Sourceval mv : t -> vec:t -> t
Sourceval mv_out : out:t -> t -> vec:t -> t
Sourceval mvlgamma : t -> p:int -> t
Sourceval mvlgamma_ : t -> p:int -> t
Sourceval narrow : t -> dim:int -> start:int -> length:int -> t
Sourceval narrow_copy : t -> dim:int -> start:int -> length:int -> t
Sourceval native_batch_norm : t -> weight:t option -> bias:t option -> running_mean:t option -> running_var:t option -> training:bool -> momentum:float -> eps:float -> t * t * t
Sourceval native_norm : t -> t
Sourceval ne1 : t -> t -> t
Sourceval ne_1 : t -> t -> t
Sourceval ne_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval ne_out1 : out:t -> t -> t -> t
Sourceval neg : t -> t
Sourceval neg_ : t -> t
Sourceval neg_out : out:t -> t -> t
Sourceval nll_loss2d : t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> t
Sourceval nll_loss2d_backward : grad_output:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> total_weight:t -> t
Sourceval nll_loss2d_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> total_weight:t -> t
Sourceval nll_loss2d_out : out:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> t
Sourceval nll_loss_backward : grad_output:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> total_weight:t -> t
Sourceval nll_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> total_weight:t -> t
Sourceval nll_loss_out : out:t -> t -> target:t -> weight:t option -> reduction:int -> ignore_index:int -> t
Sourceval nonzero : t -> t
Sourceval nonzero_out : out:t -> t -> t
Sourceval norm : t -> t
Sourceval norm2 : t -> p:Torch_core.Wrapper.Scalar.t -> dim:int list -> keepdim:bool -> t
Sourceval norm3 : t -> p:Torch_core.Wrapper.Scalar.t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval norm_except_dim : v:t -> pow:int -> dim:int -> t
Sourceval norm_out : out:t -> t -> p:Torch_core.Wrapper.Scalar.t -> dim:int list -> keepdim:bool -> t
Sourceval norm_out1 : out:t -> t -> p:Torch_core.Wrapper.Scalar.t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval normal : mean:t -> std:float -> t
Sourceval normal1 : mean:float -> std:t -> t
Sourceval normal2 : mean:t -> std:t -> t
Sourceval normal_ : t -> mean:float -> std:float -> t
Sourceval normal_out : out:t -> mean:t -> std:float -> t
Sourceval normal_out1 : out:t -> mean:float -> std:t -> t
Sourceval normal_out2 : out:t -> mean:t -> std:t -> t
Sourceval nuclear_norm : t -> keepdim:bool -> t
Sourceval nuclear_norm_out : out:t -> t -> keepdim:bool -> t
Sourceval one_hot : t -> num_classes:int -> t
Sourceval ones_like : t -> t
Sourceval ones_like1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval ones_out : out:t -> size:int list -> t
Sourceval orgqr : t -> input2:t -> t
Sourceval orgqr_out : out:t -> t -> input2:t -> t
Sourceval ormqr : t -> input2:t -> input3:t -> left:bool -> transpose:bool -> t
Sourceval ormqr_out : out:t -> t -> input2:t -> input3:t -> left:bool -> transpose:bool -> t
Sourceval pairwise_distance : x1:t -> x2:t -> p:float -> eps:float -> keepdim:bool -> t
Sourceval pdist : t -> p:float -> t
Sourceval permute : t -> dims:int list -> t
Sourceval pin_memory : t -> t
Sourceval pinverse : t -> rcond:float -> t
Sourceval pixel_shuffle : t -> upscale_factor:int -> t
Sourceval poisson : t -> t
Sourceval polygamma : n:int -> t -> t
Sourceval polygamma_ : t -> n:int -> t
Sourceval polygamma_out : out:t -> n:int -> t -> t
Sourceval pow : t -> exponent:Torch_core.Wrapper.Scalar.t -> t
Sourceval pow1 : t -> exponent:t -> t
Sourceval pow2 : Torch_core.Wrapper.Scalar.t -> exponent:t -> t
Sourceval pow_ : t -> exponent:Torch_core.Wrapper.Scalar.t -> t
Sourceval pow_1 : t -> exponent:t -> t
Sourceval pow_out : out:t -> t -> exponent:Torch_core.Wrapper.Scalar.t -> t
Sourceval pow_out1 : out:t -> t -> exponent:t -> t
Sourceval pow_out2 : out:t -> Torch_core.Wrapper.Scalar.t -> exponent:t -> t
Sourceval prelu : t -> weight:t -> t
Sourceval prelu_backward : grad_output:t -> t -> weight:t -> t * t
Sourceval prod : t -> t
Sourceval prod1 : t -> dtype:Torch_core.Kind.t -> t
Sourceval prod2 : t -> dim:int -> keepdim:bool -> t
Sourceval prod3 : t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval prod4 : t -> dim:int -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval prod_out : out:t -> t -> dim:int -> keepdim:bool -> t
Sourceval prod_out1 : out:t -> t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval prod_out2 : out:t -> t -> dim:int -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval pstrf : t -> upper:bool -> t * t
Sourceval pstrf_out : u:t -> pivot:t -> t -> upper:bool -> t * t
Sourceval put_ : t -> index:t -> source:t -> accumulate:bool -> t
Sourceval qr : t -> t * t
Sourceval qr_out : q:t -> r:t -> t -> t * t
Sourceval quantize_linear : t -> scale:float -> zero_point:int -> t
Sourceval quantized_gru_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t -> b_hh:t -> packed_ih:t -> packed_hh:t -> col_offsets_ih:t -> col_offsets_hh:t -> scale_ih:Torch_core.Wrapper.Scalar.t -> scale_hh:Torch_core.Wrapper.Scalar.t -> zero_point_ih:Torch_core.Wrapper.Scalar.t -> zero_point_hh:Torch_core.Wrapper.Scalar.t -> t
Sourceval quantized_lstm : t -> hx:t list -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> batch_first:bool -> t * t * t
Sourceval quantized_lstm_cell : t -> hx:t list -> w_ih:t -> w_hh:t -> b_ih:t -> b_hh:t -> packed_ih:t -> packed_hh:t -> col_offsets_ih:t -> col_offsets_hh:t -> scale_ih:Torch_core.Wrapper.Scalar.t -> scale_hh:Torch_core.Wrapper.Scalar.t -> zero_point_ih:Torch_core.Wrapper.Scalar.t -> zero_point_hh:Torch_core.Wrapper.Scalar.t -> t * t
Sourceval quantized_rnn_relu_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t -> b_hh:t -> packed_ih:t -> packed_hh:t -> col_offsets_ih:t -> col_offsets_hh:t -> scale_ih:Torch_core.Wrapper.Scalar.t -> scale_hh:Torch_core.Wrapper.Scalar.t -> zero_point_ih:Torch_core.Wrapper.Scalar.t -> zero_point_hh:Torch_core.Wrapper.Scalar.t -> t
Sourceval quantized_rnn_tanh_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t -> b_hh:t -> packed_ih:t -> packed_hh:t -> col_offsets_ih:t -> col_offsets_hh:t -> scale_ih:Torch_core.Wrapper.Scalar.t -> scale_hh:Torch_core.Wrapper.Scalar.t -> zero_point_ih:Torch_core.Wrapper.Scalar.t -> zero_point_hh:Torch_core.Wrapper.Scalar.t -> t
Sourceval rand_like : t -> t
Sourceval rand_like1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval rand_out : out:t -> size:int list -> t
Sourceval randint : high:int -> size:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randint1 : low:int -> high:int -> size:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randint_like : t -> high:int -> t
Sourceval randint_like1 : t -> low:int -> high:int -> t
Sourceval randint_like2 : t -> high:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randint_like3 : t -> low:int -> high:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randint_out : out:t -> high:int -> size:int list -> t
Sourceval randint_out1 : out:t -> low:int -> high:int -> size:int list -> t
Sourceval randn_like : t -> t
Sourceval randn_like1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randn_out : out:t -> size:int list -> t
Sourceval random_ : t -> t
Sourceval random_1 : t -> to_:int -> t
Sourceval random_2 : t -> from:int -> to_:int -> t
Sourceval randperm : n:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval randperm_out : out:t -> n:int -> t
Sourceval range_out : out:t -> start:Torch_core.Wrapper.Scalar.t -> end_:Torch_core.Wrapper.Scalar.t -> t
Sourceval reciprocal : t -> t
Sourceval reciprocal_ : t -> t
Sourceval reciprocal_out : out:t -> t -> t
Sourceval reflection_pad1d : t -> padding:int list -> t
Sourceval reflection_pad1d_backward : grad_output:t -> t -> padding:int list -> t
Sourceval reflection_pad1d_backward_out : grad_input:t -> grad_output:t -> t -> padding:int list -> t
Sourceval reflection_pad1d_out : out:t -> t -> padding:int list -> t
Sourceval reflection_pad2d : t -> padding:int list -> t
Sourceval reflection_pad2d_backward : grad_output:t -> t -> padding:int list -> t
Sourceval reflection_pad2d_backward_out : grad_input:t -> grad_output:t -> t -> padding:int list -> t
Sourceval reflection_pad2d_out : out:t -> t -> padding:int list -> t
Sourceval relu : t -> t
Sourceval relu_ : t -> t
Sourceval remainder : t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval remainder1 : t -> t -> t
Sourceval remainder_ : t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval remainder_1 : t -> t -> t
Sourceval remainder_out : out:t -> t -> Torch_core.Wrapper.Scalar.t -> t
Sourceval remainder_out1 : out:t -> t -> t -> t
Sourceval renorm : t -> p:Torch_core.Wrapper.Scalar.t -> dim:int -> maxnorm:Torch_core.Wrapper.Scalar.t -> t
Sourceval renorm_ : t -> p:Torch_core.Wrapper.Scalar.t -> dim:int -> maxnorm:Torch_core.Wrapper.Scalar.t -> t
Sourceval renorm_out : out:t -> t -> p:Torch_core.Wrapper.Scalar.t -> dim:int -> maxnorm:Torch_core.Wrapper.Scalar.t -> t
Sourceval repeat : t -> repeats:int list -> t
Sourceval repeat_interleave : repeats:t -> t
Sourceval repeat_interleave1 : t -> repeats:t -> dim:int -> t
Sourceval repeat_interleave2 : t -> repeats:int -> dim:int -> t
Sourceval replication_pad1d : t -> padding:int list -> t
Sourceval replication_pad1d_backward : grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad1d_backward_out : grad_input:t -> grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad1d_out : out:t -> t -> padding:int list -> t
Sourceval replication_pad2d : t -> padding:int list -> t
Sourceval replication_pad2d_backward : grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad2d_backward_out : grad_input:t -> grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad2d_out : out:t -> t -> padding:int list -> t
Sourceval replication_pad3d : t -> padding:int list -> t
Sourceval replication_pad3d_backward : grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad3d_backward_out : grad_input:t -> grad_output:t -> t -> padding:int list -> t
Sourceval replication_pad3d_out : out:t -> t -> padding:int list -> t
Sourceval reshape : t -> shape:int list -> t
Sourceval reshape_as : t -> t -> t
Sourceval resize_ : t -> size:int list -> t
Sourceval resize_as_ : t -> the_template:t -> t
Sourceval rfft : t -> signal_ndim:int -> normalized:bool -> onesided:bool -> t
Sourceval rnn_relu : t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> batch_first:bool -> t * t
Sourceval rnn_relu1 : data:t -> batch_sizes:t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> t * t
Sourceval rnn_relu_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t option -> b_hh:t option -> t
Sourceval rnn_tanh : t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> batch_first:bool -> t * t
Sourceval rnn_tanh1 : data:t -> batch_sizes:t -> hx:t -> params:t list -> has_biases:bool -> num_layers:int -> dropout:float -> train:bool -> bidirectional:bool -> t * t
Sourceval rnn_tanh_cell : t -> hx:t -> w_ih:t -> w_hh:t -> b_ih:t option -> b_hh:t option -> t
Sourceval roll : t -> shifts:int list -> dims:int list -> t
Sourceval rot90 : t -> k:int -> dims:int list -> t
Sourceval round : t -> t
Sourceval round_ : t -> t
Sourceval round_out : out:t -> t -> t
Sourceval rrelu : t -> training:bool -> t
Sourceval rrelu_ : t -> training:bool -> t
Sourceval rrelu_with_noise : t -> noise:t -> training:bool -> t
Sourceval rrelu_with_noise_ : t -> noise:t -> training:bool -> t
Sourceval rrelu_with_noise_backward : grad_output:t -> t -> noise:t -> lower:Torch_core.Wrapper.Scalar.t -> upper:Torch_core.Wrapper.Scalar.t -> training:bool -> t
Sourceval rrelu_with_noise_backward_out : grad_input:t -> grad_output:t -> t -> noise:t -> lower:Torch_core.Wrapper.Scalar.t -> upper:Torch_core.Wrapper.Scalar.t -> training:bool -> t
Sourceval rrelu_with_noise_out : out:t -> t -> noise:t -> training:bool -> t
Sourceval rsqrt : t -> t
Sourceval rsqrt_ : t -> t
Sourceval rsqrt_out : out:t -> t -> t
Sourceval rsub : t -> t -> t
Sourceval s_copy_ : t -> src:t -> non_blocking:bool -> t
Sourceval s_native_addmm : t -> mat1:t -> mat2:t -> t
Sourceval s_native_addmm_ : t -> mat1:t -> mat2:t -> t
Sourceval s_native_addmm_out : out:t -> t -> mat1:t -> mat2:t -> t
Sourceval scatter : t -> dim:int -> index:t -> src:t -> t
Sourceval scatter1 : t -> dim:int -> index:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval scatter_ : t -> dim:int -> index:t -> src:t -> t
Sourceval scatter_1 : t -> dim:int -> index:t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval scatter_add : t -> dim:int -> index:t -> src:t -> t
Sourceval scatter_add_ : t -> dim:int -> index:t -> src:t -> t
Sourceval selu : t -> t
Sourceval selu_ : t -> t
Sourceval set_ : t -> t
Sourceval set_1 : t -> source:t -> t
Sourceval set_requires_grad : t -> r:bool -> t
Sourceval sigmoid : t -> t
Sourceval sigmoid_ : t -> t
Sourceval sigmoid_backward : grad_output:t -> output:t -> t
Sourceval sigmoid_backward_out : grad_input:t -> grad_output:t -> output:t -> t
Sourceval sigmoid_out : out:t -> t -> t
Sourceval sign : t -> t
Sourceval sign_ : t -> t
Sourceval sign_out : out:t -> t -> t
Sourceval sin : t -> t
Sourceval sin_ : t -> t
Sourceval sin_out : out:t -> t -> t
Sourceval sinh : t -> t
Sourceval sinh_ : t -> t
Sourceval sinh_out : out:t -> t -> t
Sourceval slice : t -> dim:int -> start:int -> end_:int -> step:int -> t
Sourceval slogdet : t -> t * t
Sourceval smm : t -> mat2:t -> t
Sourceval smooth_l1_loss : t -> target:t -> reduction:int -> t
Sourceval smooth_l1_loss_backward : grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval smooth_l1_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval smooth_l1_loss_out : out:t -> t -> target:t -> reduction:int -> t
Sourceval soft_margin_loss : t -> target:t -> reduction:int -> t
Sourceval soft_margin_loss_backward : grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval soft_margin_loss_backward_out : grad_input:t -> grad_output:t -> t -> target:t -> reduction:int -> t
Sourceval soft_margin_loss_out : out:t -> t -> target:t -> reduction:int -> t
Sourceval softmax : t -> dim:int -> t
Sourceval softmax1 : t -> dim:int -> dtype:Torch_core.Kind.t -> t
Sourceval softplus : t -> t
Sourceval softplus_backward : grad_output:t -> t -> beta:Torch_core.Wrapper.Scalar.t -> threshold:Torch_core.Wrapper.Scalar.t -> output:t -> t
Sourceval softplus_backward_out : grad_input:t -> grad_output:t -> t -> beta:Torch_core.Wrapper.Scalar.t -> threshold:Torch_core.Wrapper.Scalar.t -> output:t -> t
Sourceval softplus_out : out:t -> t -> t
Sourceval softshrink : t -> t
Sourceval softshrink_backward : grad_output:t -> t -> lambd:Torch_core.Wrapper.Scalar.t -> t
Sourceval softshrink_backward_out : grad_input:t -> grad_output:t -> t -> lambd:Torch_core.Wrapper.Scalar.t -> t
Sourceval softshrink_out : out:t -> t -> t
Sourceval solve : t -> a:t -> t * t
Sourceval solve_out : solution:t -> lu:t -> t -> a:t -> t * t
Sourceval sort : t -> dim:int -> descending:bool -> t * t
Sourceval sort_out : values:t -> indices:t -> t -> dim:int -> descending:bool -> t * t
Sourceval sparse_coo_tensor : size:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval sparse_coo_tensor1 : indices:t -> values:t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval sparse_coo_tensor2 : indices:t -> values:t -> size:int list -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval sparse_resize_ : t -> size:int list -> sparse_dim:int -> dense_dim:int -> t
Sourceval sparse_resize_and_clear_ : t -> size:int list -> sparse_dim:int -> dense_dim:int -> t
Sourceval split : t -> split_size:int -> dim:int -> t list
Sourceval split_with_sizes : t -> split_sizes:int list -> dim:int -> t list
Sourceval sqrt : t -> t
Sourceval sqrt_ : t -> t
Sourceval sqrt_out : out:t -> t -> t
Sourceval squeeze : t -> t
Sourceval squeeze1 : t -> dim:int -> t
Sourceval squeeze_ : t -> t
Sourceval squeeze_1 : t -> dim:int -> t
Sourceval sspaddmm : t -> mat1:t -> mat2:t -> t
Sourceval sspaddmm_out : out:t -> t -> mat1:t -> mat2:t -> t
Sourceval stack : t list -> dim:int -> t
Sourceval stack_out : out:t -> t list -> dim:int -> t
Sourceval std : t -> unbiased:bool -> t
Sourceval std1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t
Sourceval std_out : out:t -> t -> dim:int list -> unbiased:bool -> keepdim:bool -> t
Sourceval stft : t -> n_fft:int -> hop_length:int -> win_length:int -> window:t option -> normalized:bool -> onesided:bool -> t
Sourceval sub : t -> t -> t
Sourceval sub_ : t -> t -> t
Sourceval sub_out : out:t -> t -> t -> t
Sourceval sum1 : t -> dtype:Torch_core.Kind.t -> t
Sourceval sum2 : t -> dim:int list -> keepdim:bool -> t
Sourceval sum3 : t -> dim:int list -> dtype:Torch_core.Kind.t -> t
Sourceval sum4 : t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval sum_out : out:t -> t -> dim:int list -> keepdim:bool -> t
Sourceval sum_out1 : out:t -> t -> dim:int list -> dtype:Torch_core.Kind.t -> t
Sourceval sum_out2 : out:t -> t -> dim:int list -> keepdim:bool -> dtype:Torch_core.Kind.t -> t
Sourceval sum_to_size : t -> size:int list -> t
Sourceval svd : t -> some:bool -> compute_uv:bool -> t * t * t
Sourceval svd_out : u:t -> s:t -> v:t -> t -> some:bool -> compute_uv:bool -> t * t * t
Sourceval symeig : t -> eigenvectors:bool -> upper:bool -> t * t
Sourceval symeig_out : e:t -> v:t -> t -> eigenvectors:bool -> upper:bool -> t * t
Sourceval tr : t -> t
Sourceval t_ : t -> t
Sourceval take : t -> index:t -> t
Sourceval take_out : out:t -> t -> index:t -> t
Sourceval tan : t -> t
Sourceval tan_ : t -> t
Sourceval tan_out : out:t -> t -> t
Sourceval tanh : t -> t
Sourceval tanh_ : t -> t
Sourceval tanh_backward : grad_output:t -> output:t -> t
Sourceval tanh_backward_out : grad_input:t -> grad_output:t -> output:t -> t
Sourceval tanh_out : out:t -> t -> t
Sourceval tensordot : t -> t -> dims_self:int list -> dims_other:int list -> t
Sourceval threshold : t -> threshold:Torch_core.Wrapper.Scalar.t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval threshold_ : t -> threshold:Torch_core.Wrapper.Scalar.t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval threshold_backward : grad_output:t -> t -> threshold:Torch_core.Wrapper.Scalar.t -> t
Sourceval threshold_out : out:t -> t -> threshold:Torch_core.Wrapper.Scalar.t -> value:Torch_core.Wrapper.Scalar.t -> t
Sourceval to_ : t -> device:Torch_core.Device.t -> t
Sourceval to1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> non_blocking:bool -> copy:bool -> t
Sourceval to2 : t -> dtype:Torch_core.Kind.t -> non_blocking:bool -> copy:bool -> t
Sourceval to3 : t -> t -> non_blocking:bool -> copy:bool -> t
Sourceval to4 : t -> device:Torch_core.Device.t -> dtype:Torch_core.Kind.t -> non_blocking:bool -> copy:bool -> t
Sourceval to_dense : t -> t
Sourceval to_dense_backward : grad:t -> t -> t
Sourceval to_mkldnn : t -> t
Sourceval to_mkldnn_backward : grad:t -> t -> t
Sourceval to_sparse : t -> t
Sourceval to_sparse1 : t -> sparse_dim:int -> t
Sourceval topk : t -> k:int -> dim:int -> largest:bool -> sorted:bool -> t * t
Sourceval topk_out : values:t -> indices:t -> t -> k:int -> dim:int -> largest:bool -> sorted:bool -> t * t
Sourceval totype : t -> scalar_type:Torch_core.Kind.t -> t
Sourceval trace : t -> t
Sourceval transpose : t -> dim0:int -> dim1:int -> t
Sourceval transpose_ : t -> dim0:int -> dim1:int -> t
Sourceval triangular_solve : t -> a:t -> upper:bool -> transpose:bool -> unitriangular:bool -> t * t
Sourceval triangular_solve_out : x:t -> m:t -> t -> a:t -> upper:bool -> transpose:bool -> unitriangular:bool -> t * t
Sourceval tril : t -> diagonal:int -> t
Sourceval tril_ : t -> diagonal:int -> t
Sourceval tril_indices : row:int -> col:int -> offset:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval tril_out : out:t -> t -> diagonal:int -> t
Sourceval triplet_margin_loss : anchor:t -> positive:t -> negative:t -> margin:float -> p:float -> eps:float -> swap:bool -> reduction:int -> t
Sourceval triu : t -> diagonal:int -> t
Sourceval triu_ : t -> diagonal:int -> t
Sourceval triu_indices : row:int -> col:int -> offset:int -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval triu_out : out:t -> t -> diagonal:int -> t
Sourceval trunc : t -> t
Sourceval trunc_ : t -> t
Sourceval trunc_out : out:t -> t -> t
Sourceval type_as : t -> t -> t
Sourceval unbind : t -> dim:int -> t list
Sourceval unfold : t -> dimension:int -> size:int -> step:int -> t
Sourceval uniform_ : t -> from:float -> to_:float -> t
Sourceval unique_consecutive : t -> return_inverse:bool -> return_counts:bool -> dim:int -> t * t * t
Sourceval unique_dim : t -> dim:int -> sorted:bool -> return_inverse:bool -> return_counts:bool -> t * t * t
Sourceval unique_dim_consecutive : t -> dim:int -> return_inverse:bool -> return_counts:bool -> t * t * t
Sourceval unsqueeze : t -> dim:int -> t
Sourceval unsqueeze_ : t -> dim:int -> t
Sourceval upsample_bicubic2d : t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_bicubic2d_backward : grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_bicubic2d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_bicubic2d_out : out:t -> t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_bilinear2d : t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_bilinear2d_backward : grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_bilinear2d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_bilinear2d_out : out:t -> t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_linear1d : t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_linear1d_backward : grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_linear1d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_linear1d_out : out:t -> t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_nearest1d : t -> output_size:int list -> t
Sourceval upsample_nearest1d_backward : grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest1d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest1d_out : out:t -> t -> output_size:int list -> t
Sourceval upsample_nearest2d : t -> output_size:int list -> t
Sourceval upsample_nearest2d_backward : grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest2d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest2d_out : out:t -> t -> output_size:int list -> t
Sourceval upsample_nearest3d : t -> output_size:int list -> t
Sourceval upsample_nearest3d_backward : grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest3d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> t
Sourceval upsample_nearest3d_out : out:t -> t -> output_size:int list -> t
Sourceval upsample_trilinear3d : t -> output_size:int list -> align_corners:bool -> t
Sourceval upsample_trilinear3d_backward : grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_trilinear3d_backward_out : grad_input:t -> grad_output:t -> output_size:int list -> input_size:int list -> align_corners:bool -> t
Sourceval upsample_trilinear3d_out : out:t -> t -> output_size:int list -> align_corners:bool -> t
Sourceval values : t -> t
Sourceval var : t -> unbiased:bool -> t
Sourceval var1 : t -> dim:int list -> unbiased:bool -> keepdim:bool -> t
Sourceval var_out : out:t -> t -> dim:int list -> unbiased:bool -> keepdim:bool -> t
Sourceval view : t -> size:int list -> t
Sourceval view_as : t -> t -> t
Sourceval where : condition:t -> t -> t -> t
Sourceval zero_ : t -> t
Sourceval zeros_like : t -> t
Sourceval zeros_like1 : t -> options:(Torch_core.Kind.t * Torch_core.Device.t) -> t
Sourceval zeros_out : out:t -> size:int list -> t
Sourceval new_tensor : unit -> t
Sourceval int_vec : ?kind:[ `int | `int16 | `int64 | `int8 | `uint8 ] -> int list -> t
Sourceval shape : t -> int list
Sourceval shape1_exn : t -> int
Sourceval shape2_exn : t -> int * int
Sourceval shape3_exn : t -> int * int * int
Sourceval shape4_exn : t -> int * int * int * int
Sourceval requires_grad : t -> bool
Sourceval grad_set_enabled : bool -> bool
Sourceval get : t -> int -> t
Sourceval select : t -> dim:int -> index:int -> t
Sourceval float_value : t -> float
Sourceval int_value : t -> int
Sourceval float_get : t -> int list -> float
Sourceval int_get : t -> int list -> int
Sourceval float_set : t -> int list -> float -> unit
Sourceval int_set : t -> int list -> int -> unit
Sourceval fill_float : t -> float -> unit
Sourceval fill_int : t -> int -> unit
Sourceval backward : ?keep_graph:bool -> ?create_graph:bool -> t -> unit
Sourceval run_backward : ?keep_graph:bool -> ?create_graph:bool -> t list -> t list -> t list
Sourceval print : t -> unit
Sourceval to_string : t -> line_size:int -> string
Sourceval sum : t -> t
Sourceval mean : t -> t
Sourceval argmax : ?dim:int -> ?keepdim:bool -> t -> t
Sourceval defined : t -> bool
Sourceval copy_ : t -> src:t -> unit
Sourceval max : t -> t -> t
Sourceval min : t -> t -> t
Sourceval set_float2 : t -> int -> int -> float -> unit
Sourceval set_float1 : t -> int -> float -> unit
Sourceval set_int2 : t -> int -> int -> int -> unit
Sourceval set_int1 : t -> int -> int -> unit
Sourceval get_float2 : t -> int -> int -> float
Sourceval get_float1 : t -> int -> float
Sourceval get_int2 : t -> int -> int -> int
Sourceval get_int1 : t -> int -> int
Sourceval (.%{}) : t -> int list -> int
Sourceval (.%{}<-) : t -> int list -> int -> unit
Sourceval (.%.{}) : t -> int list -> float
Sourceval (.%.{}<-) : t -> int list -> float -> unit
Sourceval (.%[]) : t -> int -> int
Sourceval (.%[]<-) : t -> int -> int -> unit
Sourceval (.%.[]) : t -> int -> float
Sourceval (.%.[]<-) : t -> int -> float -> unit
Sourceval no_grad_ : t -> f:(t -> 'a) -> 'a
Sourceval no_grad : (unit -> 'a) -> 'a
Sourceval zero_grad : t -> unit
Sourceval (+) : t -> t -> t
Sourceval (-) : t -> t -> t
Sourceval (*) : t -> t -> t
Sourceval (/) : t -> t -> t
Sourceval (+=) : t -> t -> unit
Sourceval (-=) : t -> t -> unit
Sourceval (*=) : t -> t -> unit
Sourceval (/=) : t -> t -> unit
Sourceval (~-) : t -> t
Sourceval (=) : t -> t -> t
Sourceval mm : t -> t -> t
Sourceval f : float -> t
Sourcetype create = ?requires_grad:bool -> ?kind:Torch_core.Kind.t -> ?device:Torch_core.Device.t -> ?scale:float -> int list -> t
Sourceval zeros : create
Sourceval ones : create
Sourceval rand : create
Sourceval randn : create
Sourceval float_vec : ?kind:[ `double | `float | `half ] -> ?device:Torch_core.Device.t -> float list -> t
Sourceval to_type : t -> type_:Torch_core.Kind.t -> t
Sourceval type_ : t -> Torch_core.Kind.t
Sourceval to_device : ?device:Torch_core.Device.t -> t -> t
Sourceval to_float0 : t -> float option
Sourceval to_float1 : t -> float array option
Sourceval to_float2 : t -> float array array option
Sourceval to_float3 : t -> float array array array option
Sourceval to_float0_exn : t -> float
Sourceval to_float1_exn : t -> float array
Sourceval to_float2_exn : t -> float array array
Sourceval to_float3_exn : t -> float array array array
Sourceval to_int0 : t -> int option
Sourceval to_int1 : t -> int array option
Sourceval to_int2 : t -> int array array option
Sourceval to_int3 : t -> int array array array option
Sourceval to_int0_exn : t -> int
Sourceval to_int1_exn : t -> int array
Sourceval to_int2_exn : t -> int array array
Sourceval to_int3_exn : t -> int array array array
Sourceval of_float0 : float -> t
Sourceval of_float1 : float array -> t
Sourceval of_float2 : float array array -> t
Sourceval of_float3 : float array array array -> t
Sourceval of_int0 : int -> t
Sourceval of_int1 : int array -> t
Sourceval of_int2 : int array array -> t
Sourceval of_int3 : int array array array -> t
Sourceval conv2d : ?padding:(int * int) -> ?dilation:(int * int) -> ?groups:int -> t -> t -> t option -> stride:(int * int) -> t
Sourceval conv_transpose2d : ?output_padding:(int * int) -> ?padding:(int * int) -> ?dilation:(int * int) -> ?groups:int -> t -> t -> t option -> stride:(int * int) -> t
Sourceval max_pool2d : ?padding:(int * int) -> ?dilation:(int * int) -> ?ceil_mode:bool -> ?stride:(int * int) -> t -> ksize:(int * int) -> t
Sourceval avg_pool2d : ?padding:(int * int) -> ?count_include_pad:bool -> ?ceil_mode:bool -> ?stride:(int * int) -> t -> ksize:(int * int) -> t
Sourceval const_batch_norm : ?momentum:float -> ?eps:float -> t -> t
Sourceval of_bigarray : ('a, 'b, Bigarray.c_layout) Bigarray.Genarray.t -> t
Sourceval copy_to_bigarray : t -> ('b, 'a, Bigarray.c_layout) Bigarray.Genarray.t -> unit
Sourceval to_bigarray : t -> kind:('a, 'b) Bigarray.kind -> ('a, 'b, Bigarray.c_layout) Bigarray.Genarray.t
Sourceval cross_entropy_for_logits : ?reduction:Torch_core.Reduction.t -> t -> targets:t -> t
Sourceval dropout : t -> p:float -> is_training:bool -> t
Sourceval nll_loss : ?reduction:Torch_core.Reduction.t -> t -> targets:t -> t
Sourceval bce_loss : ?reduction:Torch_core.Reduction.t -> t -> targets:t -> t
Sourceval bce_loss_with_logits : ?reduction:Torch_core.Reduction.t -> t -> targets:t -> t
Sourceval mse_loss : ?reduction:Torch_core.Reduction.t -> t -> t -> t
Sourceval huber_loss : ?reduction:Torch_core.Reduction.t -> t -> t -> t
Sourceval pp : Format.formatter -> t -> unit
Sourceval copy : t -> t
Sourceval shape_str : t -> string
Sourceval print_shape : ?name:string -> t -> unit
Sourceval minimum : t -> t
Sourceval maximum : t -> t
Sourceval flatten : t -> t
Sourceval squeeze_last : t -> t
Sourceval scale : t -> float -> t