diff --git a/.gitattributes b/.gitattributes index 275d29b51ce6f29f3e187d99b9534c4cb73f6ed6..f0564dda72484dace94bdc9dadbba4c2b184dae3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -64,3 +64,9 @@ phivenv/Lib/site-packages/regex/__pycache__/_regex_core.cpython-39.pyc filter=lf phivenv/Lib/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text phivenv/Lib/site-packages/safetensors/_safetensors_rust.pyd filter=lfs diff=lfs merge=lfs -text phivenv/Lib/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/core/tests/__pycache__/test_args.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/core/__pycache__/expr.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/core/__pycache__/function.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/core/__pycache__/numbers.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/sympy/logic/__pycache__/boolalg.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/phivenv/Lib/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5ccd4409d35caeb8ee75903d18fcec12f9fecd9 --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53ff032305eacb7f38e97ddc7aa2c351db56d9375d2b4879b948dbdec9a5241c +size 153296 diff --git a/phivenv/Lib/site-packages/sympy/core/__pycache__/expr.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/core/__pycache__/expr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dacaa6b3aa759e97365509d59b416045703b6fba --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/core/__pycache__/expr.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:494787b1ef4e30693899a009e5d1d951970f9cd5c70179d32d5479b656ef2315 +size 117597 diff --git a/phivenv/Lib/site-packages/sympy/core/__pycache__/function.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/core/__pycache__/function.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27f4f964b2581517f63757480a922a688dc10ad1 --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/core/__pycache__/function.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1dcd94daf74ef3961393841e4b00de5114c5ca60dff70e79ab547b8466286d8 +size 102188 diff --git a/phivenv/Lib/site-packages/sympy/core/__pycache__/numbers.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/core/__pycache__/numbers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e40b0e67d7bbd8294dc730bb19659790c77dde --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/core/__pycache__/numbers.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:520cc03f0533b4a93af3bef55fe23099a92d3c284bb6c3e19ef2aa417c33b239 +size 123920 diff --git a/phivenv/Lib/site-packages/sympy/core/tests/__pycache__/test_args.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/core/tests/__pycache__/test_args.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..245cdf906ba41ea31761c9264b6b924d64537c82 --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/core/tests/__pycache__/test_args.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bd5c59acd8381963c3706398112d9cd244f7b691a9199887a6f06954761446d +size 248121 diff --git a/phivenv/Lib/site-packages/sympy/logic/__pycache__/boolalg.cpython-39.pyc b/phivenv/Lib/site-packages/sympy/logic/__pycache__/boolalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa67057d81af34be45451f613394fcec947b537 --- /dev/null +++ b/phivenv/Lib/site-packages/sympy/logic/__pycache__/boolalg.cpython-39.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30282336ec7c1d654b8aedf482e6b3e65b637618db1ab095e4e22023027efc28 +size 103941 diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1651f259f3e4cafb91ff94c4fd79ec8fa17d26a4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bbe7b0aefc072321b005a3166f5656b3ca191274 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_native.h new file mode 100644 index 0000000000000000000000000000000000000000..63273ef0aa069bfa2e8fc9c58da8833014a9bf09 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor floor_divide_sparse(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out_sparse_zerodim(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_sparse_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & floor_divide_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d6a0b2157dc86509182e08cacc398299f174b0d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_divide_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API floor_divide { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "floor_divide(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API floor_divide_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "floor_divide.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_divide"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2c90ee274033bf37c04b06527f8090c5e2c875ef --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_floor : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec945ed89fe0ea9138b346d6b4cb7757e01b2a6d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor floor(const at::Tensor & self); +TORCH_API at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..44a3cea218ed5ba1d7474e2001cb51dcbbc1e720 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_floor_out : public at::meta::structured_floor { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor floor_sparse(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_(at::Tensor & self); +TORCH_API at::Tensor floor_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c40cbcee3acc90ea32e68cc874ea567c570968f0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/floor_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API floor { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "floor(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API floor_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "floor_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API floor_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::floor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax.h new file mode 100644 index 0000000000000000000000000000000000000000..20249087336bd96d1e4fce349078b08aafd3b85c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmax(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax::call(self, other); +} + +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax_out::call(self, other, out); +} +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmax_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c2e9ea743c25f19ff9050de8f241298d0c61a09 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f42ae5cdf369acffa64834555f4865e83f390c0a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..11b7bcb3e51abfd9f5c81761ba1eba31ba847664 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..28b3ed66755d9098c1f7e4a17e7d27fa58581674 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fmax : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0d9c1e46a0c1449a0a996dccc75fd056321de1a7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a6575a0d7cb3d64a676dfd53203a189b837c67 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fmax_out : public at::meta::structured_fmax { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..54e28869c2cef3250c251049776ded40238e4ca1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmax_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmax { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmax"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fmax(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmax_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmax"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin.h new file mode 100644 index 0000000000000000000000000000000000000000..230cf5e7c0763e39e90a81acffc2bde84a9bd186 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmin(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin::call(self, other); +} + +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin_out::call(self, other, out); +} +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmin_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0286e264723ba9bc33026441dfffdb783329fa3a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fdfd52efeec5f0c44d1b272e257868a440b0e088 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db82fc298073e12adcfc6f18d9cb094788d2d69a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..44806742c6978ad4b26a7fa82302dfec52271a03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fmin : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a58e3a0ee340b543ac5fbab22c1f95c062c4ce9d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_native.h new file mode 100644 index 0000000000000000000000000000000000000000..16819dd9d9191ba1d41d453be54a982dffec0731 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fmin_out : public at::meta::structured_fmin { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4781927e994dd6a4bdf86159589c1e15f4adc30e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmin_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmin { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmin"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fmin(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmin_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmin"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod.h new file mode 100644 index 0000000000000000000000000000000000000000..f288118b2f733927c6e40b32764ccf28cf3ad874 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar::call(self, other); +} + +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d216f54292bed8c32a9917516eea3a1635f19914 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..101c50147e6935403b9d127d25e836bde88ae2e8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..68ae48441bb835bb442ca30ea8a89552c1096da2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..98162236fef896d6a01b27b81e3b1c67b3ec6a86 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5df223aca0711cdac0b7b806ee73b339066062c3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fmod_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f6d2fb14df1b38cbf1eb3a743483f5777127f7b5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_native.h new file mode 100644 index 0000000000000000000000000000000000000000..75eacd171e404a79d45f15b1f2992bde641509c5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & fmod_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other); +struct TORCH_API structured_fmod_out : public at::meta::structured_fmod_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..061b098f6b51919b0257a6f5f7db2bf69b18fec5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fmod_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmod_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "fmod.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "fmod.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmod__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fmod_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac.h new file mode 100644 index 0000000000000000000000000000000000000000..1fdea3b528e17b0443120eaec8073d082266f5f9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::frac(Tensor self) -> Tensor +inline at::Tensor frac(const at::Tensor & self) { + return at::_ops::frac::call(self); +} + +// aten::frac_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & frac_(at::Tensor & self) { + return at::_ops::frac_::call(self); +} + +// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::frac_out::call(self, out); +} +// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::frac_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fddb0b3ba0466f19d6014d097c16a03ace5ff841 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e12d7132e708c35d908e5c475da85b9412df364 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..51ec6c5ab7603e803aafb3ea317057091f2eeb1a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ca9bc2290328505077395ee688bcf55a1f030b1f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_frac : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d567c3598b681d0fe44ba7109dfbe954beba8e5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f69e2eda0005f3452adea139d11b751364394617 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_frac_out : public at::meta::structured_frac { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor frac_sparse(const at::Tensor & self); +TORCH_API at::Tensor & frac_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_sparse_(at::Tensor & self); +TORCH_API at::Tensor frac_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & frac_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e4a6b6c87b188253c83adf5f340303093176cd97 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frac_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frac { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frac"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "frac(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API frac_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frac_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "frac_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API frac_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frac"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..1ed5415e448cc53e769f28fb20dd21f39f32d40e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} +// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} + +// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) +inline ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..135829dfa092c95068876508335904da5e70b897 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} +// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} + +// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor +inline at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b6f6c03c55f7e78c277327cdda48b8c1bb27d50 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c54bb654b78a9808eea0d7b0ee5c25a0cde65186 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..546067600b13f5430dc5cc7bcb701bdc698dbc81 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8f5e7a3ad607584f715b104e48ce64e8b3a59e51 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fractional_max_pool2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10c2b5ab6782f57b511da69ec893f31cf1292d9c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..12a27a9789b52504b9c89fac21ed1c9a36451b64 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fractional_max_pool2d_backward_cpu : public at::meta::structured_fractional_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_fractional_max_pool2d_backward_cuda : public at::meta::structured_fractional_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7b8fdf5d61089dc9404b495f07d927e2a0262bd6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool2d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API fractional_max_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..259251c9328d39edd8b81d14475157f10033d132 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1e0442a9f2e1f38f23aa05c81ea94eb4dec7fa0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d656bf3ff5d1500628bc1722299742822d6ba33e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..baf7f5f38a1409401f74098a5599a6d19efc23ee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fractional_max_pool2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a08d354f54cbaaec15d8dc6c7359f516b50d9c87 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d7b0534cadd63eefd9227debc15ed745947326d3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fractional_max_pool2d_out_cpu : public at::meta::structured_fractional_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, const at::Tensor & output, const at::Tensor & indices); +}; +struct TORCH_API structured_fractional_max_pool2d_out_cuda : public at::meta::structured_fractional_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, const at::Tensor & output, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..38a54ce58ff6f4db6f08819b5f67c083c9d530aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool2d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool2d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..258223913c8a615f74645d32576f398b4e20d246 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} + +// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) +inline ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..47f5e6305c32727d21fa3ef4f3407453285ed5f5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} + +// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor +inline at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f3a1e0339961545b04216d2dc77716c2815b37 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a130b27249dc57486d23946ff2f627ae3418f0b6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02c1824301ee6e9e3facef74963adb51f127c11b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fractional_max_pool3d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +TORCH_API at::Tensor fractional_max_pool3d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool3d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a1b85343eb5771038ce43b77446eb0f89bee1e2b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API fractional_max_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a2dc73f52b7d9448fc6847781d6a6a3b238e8c5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb236be24b70896f0371b30e29002707a0f46b19 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45a0bd5ce1a20a7d4135db2cf86f997e1313d7ed --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8b00d8a6999ac33fe97e21d50e1170f4e1ce49f2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta.h @@ -0,0 +1,239 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fractional_max_pool3d : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_poolSizeT(int64_t value) { + static_assert(POOLSIZET == false, "poolSizeT already set"); + precompute_out ret; +ret.poolSizeT = value; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_poolSizeH(int64_t value) { + static_assert(POOLSIZEH == false, "poolSizeH already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = value; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_poolSizeW(int64_t value) { + static_assert(POOLSIZEW == false, "poolSizeW already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = value; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_outputT(int64_t value) { + static_assert(OUTPUTT == false, "outputT already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = value; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_outputH(int64_t value) { + static_assert(OUTPUTH == false, "outputH already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = value; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_outputW(int64_t value) { + static_assert(OUTPUTW == false, "outputW already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = value; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_numBatch(int64_t value) { + static_assert(NUMBATCH == false, "numBatch already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = value; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_numPlanes(int64_t value) { + static_assert(NUMPLANES == false, "numPlanes already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = value; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_inputT(int64_t value) { + static_assert(INPUTT == false, "inputT already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = value; +ret.inputH = this->inputH; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_inputH(int64_t value) { + static_assert(INPUTH == false, "inputH already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = value; +ret.inputW = this->inputW; +return ret; + } + + + precompute_out set_inputW(int64_t value) { + static_assert(INPUTW == false, "inputW already set"); + precompute_out ret; +ret.poolSizeT = this->poolSizeT; +ret.poolSizeH = this->poolSizeH; +ret.poolSizeW = this->poolSizeW; +ret.outputT = this->outputT; +ret.outputH = this->outputH; +ret.outputW = this->outputW; +ret.numBatch = this->numBatch; +ret.numPlanes = this->numPlanes; +ret.inputT = this->inputT; +ret.inputH = this->inputH; +ret.inputW = value; +return ret; + } + + int64_t poolSizeT; +int64_t poolSizeH; +int64_t poolSizeW; +int64_t outputT; +int64_t outputH; +int64_t outputW; +int64_t numBatch; +int64_t numPlanes; +int64_t inputT; +int64_t inputH; +int64_t inputW; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..19a17510dba8b39fecc4d5d423d6eaaa45860460 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c806df301344540f1dc181ab006ecb8556a67103 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fractional_max_pool3d_out_cpu : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +struct TORCH_API structured_fractional_max_pool3d_out_cuda : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..de790c389d4f07263eb18ae07d4c6116b076a051 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp.h new file mode 100644 index 0000000000000000000000000000000000000000..5ea4cb750c5fe0a4f94c6df83d49c57260c69967 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +inline ::std::tuple frexp(const at::Tensor & self) { + return at::_ops::frexp_Tensor::call(self); +} + +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +inline ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) { + return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent); +} +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +inline ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) { + return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44ae8c8feba959b304255da58b7c3f881c57963d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple frexp(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e85876785015ddd69963187b62f08fa4723f0fb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self); +TORCH_API ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..111d64c10f0ec28a09c6c6a677cf5996659bc4e3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self); +TORCH_API ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..233cf70c61f772fdcbda1a9033e602a0c277c2f1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple frexp(const at::Tensor & self); +TORCH_API ::std::tuple frexp_out(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cfc3a3aa073c9714845a95a08f6d395c222e1242 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frexp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frexp_Tensor { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frexp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API frexp_Tensor_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frexp"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..c91240368115fab2d3bae029ea9186e1169a893e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_dim::call(self, dim, keepdim); +} + +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out); +} +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f7a4aef20d21aef9384df734c81bf6004a2262cb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b6e13ce21110da55e53365dc7bdee7d382f24bd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..40cabb4a6ee4240c1a17b6b1073c74e26cc0257c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frobenius_norm_dim { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frobenius_norm"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API frobenius_norm_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frobenius_norm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_blob.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_blob.h new file mode 100644 index 0000000000000000000000000000000000000000..5188c6f88e0541c18f1cf67be643a2604192c7e0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_blob.h @@ -0,0 +1,167 @@ +#pragma once +#include + +namespace at { + +namespace detail { + +TORCH_API inline void noopDelete(void*) {} + +} // namespace detail + +/// Provides a fluent API to construct tensors from external data. +/// +/// The fluent API can be used instead of `from_blob` functions in case the +/// required set of parameters does not align with the existing overloads. +/// +/// at::Tensor tensor = at::for_blob(data, sizes) +/// .strides(strides) +/// .context(context, [](void *ctx) { delete static_cast(ctx); +/// }) .options(...) .make_tensor(); +/// +class TORCH_API TensorMaker { + friend TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept; + + public: + using ContextDeleter = DeleterFnPtr; + + TensorMaker& strides(OptionalIntArrayRef value) noexcept { + strides_ = value; + + return *this; + } + + TensorMaker& storage_offset(std::optional value) noexcept { + storage_offset_ = value; + + return *this; + } + + TensorMaker& deleter(std::function value) noexcept { + deleter_ = std::move(value); + + return *this; + } + + TensorMaker& context(void* value, ContextDeleter deleter = nullptr) noexcept { + ctx_ = std::unique_ptr{ + value, deleter != nullptr ? deleter : detail::noopDelete}; + + return *this; + } + + TensorMaker& target_device(std::optional value) noexcept { + device_ = value; + + return *this; + } + + TensorMaker& options(TensorOptions value) noexcept { + opts_ = value; + + return *this; + } + + TensorMaker& resizeable_storage() noexcept { + resizeable_ = true; + + return *this; + } + + TensorMaker& allocator(c10::Allocator* allocator) noexcept { + allocator_ = allocator; + + return *this; + } + + Tensor make_tensor(); + + private: + explicit TensorMaker(void* data, IntArrayRef sizes) noexcept + : data_{data}, sizes_{sizes} {} + + std::size_t computeStorageSize() const noexcept; + + DataPtr makeDataPtrFromDeleter() noexcept; + + DataPtr makeDataPtrFromContext() noexcept; + + IntArrayRef makeTempSizes() const noexcept; + + void* data_; + IntArrayRef sizes_; + OptionalIntArrayRef strides_{}; + std::optional storage_offset_{}; + std::function deleter_{}; + std::unique_ptr ctx_{nullptr, detail::noopDelete}; + std::optional device_{}; + TensorOptions opts_{}; + bool resizeable_{}; + c10::Allocator* allocator_{}; +}; + +inline TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept { + return TensorMaker{data, sizes}; +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + const std::function& deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .strides(strides) + .deleter(deleter) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + int64_t storage_offset, + const std::function& deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .strides(strides) + .storage_offset(storage_offset) + .deleter(deleter) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + std::function deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .deleter(std::move(deleter)) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + const TensorOptions& options = {}) { + return for_blob(data, sizes).strides(strides).options(options).make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + const TensorOptions& options = {}) { + return for_blob(data, sizes).options(options).make_tensor(); +} + +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file.h new file mode 100644 index 0000000000000000000000000000000000000000..7837e2b5ac71c094bfb16e365f48670cf4e635bd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, at::TensorOptions options={}) { + return at::_ops::from_file::call(filename, shared, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor from_file(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory); +} + +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0) { + return at::_ops::from_file_out::call(filename, shared, size, out); +} +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & from_file_outf(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out) { + return at::_ops::from_file_out::call(filename, shared, size, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1957767e65f43090784e160f2e72dda3e3967eaa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0); +TORCH_API at::Tensor & from_file_outf(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7cabdcd77c8ef345b481f8e7748b397203be14f9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, at::TensorOptions options={}); +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_native.h new file mode 100644 index 0000000000000000000000000000000000000000..54030f889f4ba5e38381ddb10d20ee4554358540 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & from_file_out(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..beb767fab90aa419555144da5bf0998de999f7b5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/from_file_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API from_file { + using schema = at::Tensor (c10::string_view, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::from_file"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API from_file_out { + using schema = at::Tensor & (c10::string_view, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::from_file"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full.h new file mode 100644 index 0000000000000000000000000000000000000000..06f2e5d6c8abde6251459eb1972272476b0cc526 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full.h @@ -0,0 +1,132 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::full_names::call(size, fill_value, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full_names::call(size, fill_value, names, dtype, layout, device, pin_memory); +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template >> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template >> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template >> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template >> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c3386d58f0d5fb8a17fdb1b5827893b8e10244dc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +TORCH_API at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like.h new file mode 100644 index 0000000000000000000000000000000000000000..d73ec7828287ed87167b7163893b4c5e202e9192 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt) { + return at::_ops::full_like::call(self, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format); +} + +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format=::std::nullopt) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e88893e87887b3deccd597251cf49b438b9f144 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +TORCH_API at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0c77cba1e7f023137d2a4926bd0b303e85d7a91b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & full_like_out(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c0ba4ea98cf0749ffd79b947cfa6836bbc7d282a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_like_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API full_like { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full_like"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +}; + +struct TORCH_API full_like_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full_like"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_native.h new file mode 100644 index 0000000000000000000000000000000000000000..11477b039e6fd9ac3cf5cae6df3f9f24ce6b5f9f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & full_names_out(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & full_out(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/full_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..49b26a9ca26c801f291201d76fa7af44adc9627e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/full_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API full_names { + using schema = at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "names"; + static constexpr const char* schema_str = "full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API full { + using schema = at::Tensor (c10::SymIntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API full_out { + using schema = at::Tensor & (c10::SymIntArrayRef, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +}; + +struct TORCH_API full_names_out { + using schema = at::Tensor & (at::IntArrayRef, const at::Scalar &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "names_out"; + static constexpr const char* schema_str = "full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h new file mode 100644 index 0000000000000000000000000000000000000000..f00ba647212cb3665f7ed628943ead5e54091769 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor +inline at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::fused_moving_avg_obs_fake_quant::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..80a198d5ab639f3862479326a18a015585574d0e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d765cfd716dc3404c8302061497e85e62fda6a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..723e4a5312bcbb5e22cac94f45db54e8ba91bf2a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fused_moving_avg_obs_fake_quant { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fused_moving_avg_obs_fake_quant"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather.h new file mode 100644 index 0000000000000000000000000000000000000000..394c0c14b58658312b713535f023ecee3d53ff4d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather::call(self, dim, index, sparse_grad); +} + +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname::call(self, dim, index, sparse_grad); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5f0111810878f6df53ee659bbcbd24d0985d57e5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor +inline at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad02fa68cbcf8a26f7f9e8b0f9b40ac35f3b750f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..11963adbb07e65a865bb6ba23166166c0e1f4086 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d3d3838edc8bb3bcbaad7b585433844408f2bf8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ba6c74e72381b077d006ed4c364459e58f20c9c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd45b2a578182295615b37a80d1f5ca762e01d67 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b003dc68d0ebb5657e4d253273c04d123a6af51f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4a05784f2aa397dd2b24eba87bdeb3acc09ee39d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..dbb543e85b4c96456953cb206c2fd1ff249b34b5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gather : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4940f4aa677329d2a3d25b3a4bc1599685b163ef --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_native.h new file mode 100644 index 0000000000000000000000000000000000000000..95d12d4f68d4a66495d73cf615ac816d7bacb4c0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gather_out : public at::meta::structured_gather { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, const at::Tensor & out); +}; +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..719acf0a4c3b86c068e1c28d9dc8c84a1a15da7f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gather_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +struct TORCH_API gather_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd.h new file mode 100644 index 0000000000000000000000000000000000000000..3a83d74ed0133dee430cd90302a729e886130897 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_out::call(self, other, out); +} +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gcd_out::call(self, other, out); +} + +// aten::gcd(Tensor self, Tensor other) -> Tensor +inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd::call(self, other); +} + +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ad6eaec7684b78a7e52e76d11b96044132bd2b9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5664b1e44c2559e118addb54ebd0f389cfe85412 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de82114477ee8c831259d5483fa4193b1eaa3e18 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7197412c03e137fcf9eab92ca1c0174314432559 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gcd : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca353120d58264cea71f6d6d49d533750b0bec39 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a28c67d7e5b054622d58cc22b66f41bb10d67f40 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gcd_out : public at::meta::structured_gcd { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..35bbd4af2d718d6ef86a410528861dd9f6c9cdab --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gcd_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gcd_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API gcd { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gcd(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API gcd_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge.h new file mode 100644 index 0000000000000000000000000000000000000000..c5e089fdaaa360659216a4778b4883698d8fbc69 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} + +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar::call(self, other); +} + +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec039ef1c3b72893d0c07db953c42e47b2a5ad74 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0e996717d75b0737122a2d932cc252c034d3c537 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f48249e5e3e31c429033f84dfd02c55219eacbba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e0e2a6cf7193a3f45ea7f4701ae6987250c3a3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_ge_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_ge_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..43b825f29078492681715ae6fc0319ea9eedb0ca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e2702ab0411ad9dd7d52f1c6405544c1a0ed3d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_ge_Scalar_out : public at::meta::structured_ge_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor ge_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_ge_Tensor_out : public at::meta::structured_ge_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor ge_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f202d0f4e165d142219276a1738102ccfc0e651a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ge_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ge_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API ge_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "ge.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API ge_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API ge_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "ge.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ge__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API ge__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ge_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu.h new file mode 100644 index 0000000000000000000000000000000000000000..a0a114ece813670387d5137afaa8d3e2fc7f13e1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_out::call(self, approximate, out); +} +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) { + return at::_ops::gelu_out::call(self, approximate, out); +} + +// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) +inline at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_::call(self, approximate); +} + +// aten::gelu(Tensor self, *, str approximate='none') -> Tensor +inline at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu::call(self, approximate); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..eac1b9b12ba7ec54985d2f4a450d73baf32a2995 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input); +} +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) { + return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input); +} + +// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor +inline at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward::call(grad_output, self, approximate); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d1c2f05e5caf854c7f280081584a024a5c33514 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df8ef33d9bc8a766bc869a590b14257cbdbe5dca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f1dccda086b3ed2d4544738bbe202097185af9c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..919522911d47f6843004a315d6af5ad61c2bf50c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gelu_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec5755cac3adad0aa04b536649f58ecf0dced09a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8c718e2cc9c3349c3c438d4cd1013d0ce95343c0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gelu_backward_out_cpu : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +struct TORCH_API structured_gelu_backward_out_cuda : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor gelu_backwards_nested(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor mkldnn_gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8b7bbf23159789c4da600a2803f8563f859688ca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); +}; + +struct TORCH_API gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5efa7bbcef94d13d0f9290444584f959085c653 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..343b0173d9ffb7a439beea73776bf77a56c1c035 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e35c0afbd133b2f1d6059c454d072ef9075cfde --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2c71d097e4972ae1de250316db074afcfadc7bc7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gelu : public TensorIteratorBase { + + + void meta(const at::Tensor & self, c10::string_view approximate); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..006b23dd3f545579b218d8fcc45e2da437037bc2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..35270b3ae07696307e362d9d1785af81f553d9ad --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_native.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gelu_out_cpu : public at::meta::structured_gelu { +void impl(const at::Tensor & self, c10::string_view approximate, const at::Tensor & out); +}; +struct TORCH_API structured_gelu_out_cuda : public at::meta::structured_gelu { +void impl(const at::Tensor & self, c10::string_view approximate, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & NestedTensor_gelu_(at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor mkldnn_gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor gelu_quantized_cpu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_quantized_cpu_(at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor gelu_quantized_cuda(const at::Tensor & self, c10::string_view approximate="none"); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2448bc835b550762ca0e0acb5b23e05ad99332da --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gelu_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +}; + +struct TORCH_API gelu_ { + using schema = at::Tensor & (at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, c10::string_view approximate); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate); +}; + +struct TORCH_API gelu { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gelu(Tensor self, *, str approximate='none') -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric.h new file mode 100644 index 0000000000000000000000000000000000000000..a1bdf2614809c98c48c8c12bfe8d6dee15799ce4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & geometric_out(at::Tensor & out, const at::Tensor & self, double p, ::std::optional generator=::std::nullopt) { + return at::_ops::geometric_out::call(self, p, generator, out); +} +// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & geometric_outf(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out) { + return at::_ops::geometric_out::call(self, p, generator, out); +} + +// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor +inline at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt) { + return at::_ops::geometric::call(self, p, generator); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1754b02b27be80428eb8eae45849dc3a93b98092 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_out(at::Tensor & out, const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_outf(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2db7b3088f6fe6019c18f21f13f450b491e22722 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2514a0748077cbeb5de29e42a213655a66d5128 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0e4a2158107028a353c21c6d6338aeadd5de3293 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_native.h new file mode 100644 index 0000000000000000000000000000000000000000..69a950187bcb749832bdd8965f8652ed738d2341 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_out(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..da3caf48ebe45ca786f15c298705940c732b7666 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geometric_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API geometric_ { + using schema = at::Tensor & (at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::geometric_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, double p, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional generator); +}; + +struct TORCH_API geometric_out { + using schema = at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::geometric"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API geometric { + using schema = at::Tensor (const at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::geometric"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, double p, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a5f57f5c22436133f57f7b9bf28767554d13d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) +inline ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self) { + return at::_ops::geqrf_a::call(self, a, tau); +} +// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) +inline ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) { + return at::_ops::geqrf_a::call(self, a, tau); +} + +// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) +inline ::std::tuple geqrf(const at::Tensor & self) { + return at::_ops::geqrf::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f0ff7b256e93647d7cf1ab153f8b21632d4b9509 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self); +TORCH_API ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..748245bd3ab46da03e13ec1fe50519db68f0065a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self); +TORCH_API ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b2ffcb035b8c437c0f4d0852f29f908194d1b657 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f095acf403cee0d93dfb9124b02d2873fec716ed --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/geqrf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API geqrf_a { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::geqrf"; + static constexpr const char* overload_name = "a"; + static constexpr const char* schema_str = "geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau); +}; + +struct TORCH_API geqrf { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::geqrf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "geqrf(Tensor self) -> (Tensor a, Tensor tau)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ger.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger.h new file mode 100644 index 0000000000000000000000000000000000000000..3caa265141fd9b643869b97dc6517ff166bf7cb8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ger(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger::call(self, vec2); +} + +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger_out::call(self, vec2, out); +} +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::ger_out::call(self, vec2, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4b62979f2cc4756ea613ffbe25b598ce9878e9e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9170244cdd635ecf99a8c3d632236fba47d626d9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & ger_out(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5986c529375c2e64bc0b5e4f1ea4c0950815711e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ger_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ger { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ger"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "ger(Tensor self, Tensor vec2) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & vec2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2); +}; + +struct TORCH_API ger_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ger"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu.h new file mode 100644 index 0000000000000000000000000000000000000000..7e2577f2c67735a222acae58e353fbfe84c6c379 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu_out::call(self, dim, out); +} +// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::glu_out::call(self, dim, out); +} + +// aten::glu(Tensor self, int dim=-1) -> Tensor +inline at::Tensor glu(const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu::call(self, dim); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d2b6adbfdc462dd7da4eab38ef80dc89eb9fd990 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input); +} +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) { + return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input); +} + +// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor +inline at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward::call(grad_output, self, dim); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e2e1edf478aed0ef12a6ac9fc1d05f75bc10862a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bff6f91ef64bccc38cab062686b3ef86db5e8b3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp.h new file mode 100644 index 0000000000000000000000000000000000000000..459367649f4864aa52c0d3b5d5f21bdaf71234b6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor +inline at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim); +} + +// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_backward_jvp_out(at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp_out::call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out); +} +// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_backward_jvp_outf(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_backward_jvp_out::call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6000c6ba37ffd9523ec9f4dae077c8c24a7d3e73 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & glu_backward_jvp_out(at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); +TORCH_API at::Tensor & glu_backward_jvp_outf(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d0fbdf26181413f73fd1a5ce6b9985b5f4d93f7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b343319b1f1fda7dbd102b8dd14abfaf637b5e68 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0251bc57dcfcae16707e1f0a9813e1b926859219 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & glu_backward_jvp_out(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7b041044651907f8845b02fdca121975b3221fa3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_backward_jvp { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_backward_jvp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); +}; + +struct TORCH_API glu_backward_jvp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_backward_jvp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5eece93ad2b221951ab75d8cee0875a6b28388e4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor glu_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_cpu_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); +TORCH_API at::Tensor glu_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & glu_backward_cuda_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..abb9b077b4064fda14c1056fbee65a6443d08c2f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); +}; + +struct TORCH_API glu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e3561d9dad682f9ae9ef35cf6fa3a8aaf68bfb48 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c3b6d23ac46900711c0ec63c7ba66309abb9f5a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d92e3a27808193e6829ba894baf1bfc8c340a5f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c1779ae561fddb0a43aa3bbdc531b64d9d9f64 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor +inline at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp::call(glu, x, dx, dim); +} + +// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out); +} +// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e1833a3415fdf495c7b2f8ce6e0d1de642f534b3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); +TORCH_API at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5698fd8eba677db36d182d20cf889915c1784278 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5eaded41dd0efb6764bf3da71b856fd31c65101d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af2cb8f65a05f9a227d828225aebb4eaf5a50483 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & glu_jvp_out(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..50263c753cb63a39d1336821c45b63fc112c7418 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_jvp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_jvp { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_jvp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor"; + static at::Tensor call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); +}; + +struct TORCH_API glu_jvp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu_jvp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..80d0320c4ae4b40f3d765fefe2015b77913084ce --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_glu : public TensorIteratorBase { + + + void meta(const at::Tensor & self, int64_t dim); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c74ecdf0d8d51339316f11c26b466c14141c2a50 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ca78f37872ddbca9ffeda2756f77c363b26f200c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_glu_out : public at::meta::structured_glu { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f443f3c55f9653da8d8fb0b68af2e09636191d75 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/glu_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out); +}; + +struct TORCH_API glu { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::glu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "glu(Tensor self, int dim=-1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient.h new file mode 100644 index 0000000000000000000000000000000000000000..0f0f2a204947814b181721023c815443889a6862 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, const ::std::optional & spacing=::std::nullopt, ::std::optional dim=::std::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order); +} + +// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_array::call(self, dim, edge_order); +} + +// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order); +} + +// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c716e05c3bcdbfe1ea74d8f9f9ded64a3a506ee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector gradient(const at::Tensor & self, const ::std::optional & spacing=::std::nullopt, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f16b0f902b8b10fafb6bf59d8afd8e3bb2902978 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector gradient(const at::Tensor & self, const ::std::optional & spacing=::std::nullopt, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..052350cfd88309855660231454e1bf21778cc2aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gradient_ops.h @@ -0,0 +1,95 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gradient_scalarint { + using schema = ::std::vector (const at::Tensor &, const ::std::optional &, ::std::optional, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "scalarint"; + static constexpr const char* schema_str = "gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, const ::std::optional & spacing, ::std::optional dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & spacing, ::std::optional dim, int64_t edge_order); +}; + +struct TORCH_API gradient_scalararray { + using schema = ::std::vector (const at::Tensor &, const at::Scalar &, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "scalararray"; + static constexpr const char* schema_str = "gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order); +}; + +struct TORCH_API gradient_array { + using schema = ::std::vector (const at::Tensor &, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "array"; + static constexpr const char* schema_str = "gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order); +}; + +struct TORCH_API gradient_scalarrayint { + using schema = ::std::vector (const at::Tensor &, at::ArrayRef, ::std::optional, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "scalarrayint"; + static constexpr const char* schema_str = "gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim, int64_t edge_order); +}; + +struct TORCH_API gradient_scalarrayarray { + using schema = ::std::vector (const at::Tensor &, at::ArrayRef, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "scalarrayarray"; + static constexpr const char* schema_str = "gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order); +}; + +struct TORCH_API gradient_tensorarrayint { + using schema = ::std::vector (const at::Tensor &, at::TensorList, ::std::optional, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "tensorarrayint"; + static constexpr const char* schema_str = "gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, at::TensorList spacing, ::std::optional dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, ::std::optional dim, int64_t edge_order); +}; + +struct TORCH_API gradient_tensorarray { + using schema = ::std::vector (const at::Tensor &, at::TensorList, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gradient"; + static constexpr const char* overload_name = "tensorarray"; + static constexpr const char* schema_str = "gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]"; + static ::std::vector call(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater.h new file mode 100644 index 0000000000000000000000000000000000000000..612b3be4d7b949db7fc921c8b24b56c2101cecf5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar_out::call(self, other, out); +} +// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_Scalar_out::call(self, other, out); +} + +// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor greater(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar::call(self, other); +} + +// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor_out::call(self, other, out); +} +// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_Tensor_out::call(self, other, out); +} + +// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor greater(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f1984151d6201b5b15520b743eefb3cf914bd7dc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal.h new file mode 100644 index 0000000000000000000000000000000000000000..125442074f6a7bcc5aa3ef5fd52c52749e595859 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar_out::call(self, other, out); +} +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_equal_Scalar_out::call(self, other, out); +} + +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar::call(self, other); +} + +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor_out::call(self, other, out); +} +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_equal_Tensor_out::call(self, other, out); +} + +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..78a53cb0f6770c55bbf0518337eee658ac4601b7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7c8ce73045be56c5a7709afab67d5921263003a9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..350fc668b24fadfbe9b5168d23a5dd3a612d9b6a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_equal_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API greater_equal_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API greater_equal_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "greater_equal.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API greater_equal_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API greater_equal_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "greater_equal.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API greater_equal__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API greater_equal__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_equal_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_native.h new file mode 100644 index 0000000000000000000000000000000000000000..677877b4e7397a343f238cd1901dd4ab35e4edeb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b046dda847890a41ee2fbc662716c2e74648462 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/greater_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API greater_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API greater_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "greater.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API greater_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API greater_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "greater.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API greater__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API greater__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::greater_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..74c1117f31e80f1f944f91fa80ae3c0c3b80fb9f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +inline at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d.h new file mode 100644 index 0000000000000000000000000000000000000000..f66358be6c4d804c0ea2516045d5735824327029 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +inline at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners); +} + +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6e00c1f947f48fe084efe79abab5a2088dc1a7c8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); +} + +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_2d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf05710c1fe6dad5fbd03f6796e500345a2051c7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple grid_sampler_2d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API ::std::tuple grid_sampler_2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c16ddff24eb71e4a3d681d38430f1c68d0e69e13 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9cb06581f93850f0d7217b82ea2cf1334a2fc7dc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3191f87473f6ceddbc4017a5f4a4702f30850ec3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple grid_sampler_2d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple grid_sampler_2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API ::std::tuple grid_sampler_2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68d6cf96469587514ec9a7c460188bc956683412 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler_2d_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_2d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +}; + +struct TORCH_API grid_sampler_2d_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, ::std::array, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_2d_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb9af51f8898503f3497ab9d3bcc21a34587e9f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00c72b6a8ff14f4513d0d5147987cad7f7e4ce09 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4cf76dde82879d9d0b721b83ce7d97a17046e166 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d3d05b2345b852a995e16ce84169732dee89f777 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & grid_sampler_2d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +TORCH_API at::Tensor grid_sampler_2d_cpu(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor grid_sampler_2d_cuda(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d2aa36a3980f4c0121055c231309a607ef49a4fd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_2d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler_2d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_2d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +struct TORCH_API grid_sampler_2d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_2d"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d.h new file mode 100644 index 0000000000000000000000000000000000000000..ea377b095ce7ab11ef1aac02b2f96ef29a31318b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +inline at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners); +} + +// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_3d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} +// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & grid_sampler_3d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_3d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbad01606e08b75fb97f2a36f9d58c4e792ee4f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); +} + +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_3d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15d71282be0ba025752b3c83dcbdf1062b853815 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple grid_sampler_3d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API ::std::tuple grid_sampler_3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..879e18d6965fd85da7997e1647e394bd235a85cf --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ed2ce2cd9f0f4214b299faee29360c7d05c68b70 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c767895d140c1c34db1a4d8d432491cefc5b647d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple grid_sampler_3d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple grid_sampler_3d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API ::std::tuple grid_sampler_3d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5b1efe340412e166f48c9666033a0a7267de80af --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler_3d_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_3d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +}; + +struct TORCH_API grid_sampler_3d_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, ::std::array, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_3d_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f39d92b02c844000aa36bb041e57888979f83128 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & grid_sampler_3d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & grid_sampler_3d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fdcbb5b0fa3fe6a11d6e486f799c0b49e62a8d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df2d70dabd96f666ab23e678fb43f62f2a508b37 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..80d99011654e31130c990c717748061c88a1a770 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & grid_sampler_3d_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +TORCH_API at::Tensor grid_sampler_3d_cpu(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor grid_sampler_3d_cuda(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5ea654fc5e0e170050dee5fa1dd4e2050a55e354 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler_3d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_3d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +struct TORCH_API grid_sampler_3d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler_3d"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf92f1de3b1bf21a8768e2d1799f0d415fc225d2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_native.h new file mode 100644 index 0000000000000000000000000000000000000000..abe50cc618048fccc6b6f78dea92b638f80b8ba2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..56a9b46c0486b4061b31df52848f27b1e5b72658 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/grid_sampler_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::grid_sampler"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..5de29b60ee3b1a458c2778aa5e26d73f07eb2ff6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor +inline at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true) { + return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c730e21d0f6dd37911027bec24b7512b14d10bb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..411ae5c70a5b7738c5c2f3e695ff0b6ef8af6545 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..04b5ab5bce5f6d4a9cd990d20b97d3cbd0c70b46 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/group_norm_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API group_norm { + using schema = at::Tensor (const at::Tensor &, int64_t, const ::std::optional &, const ::std::optional &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::group_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor"; + static at::Tensor call(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enabled); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru.h new file mode 100644 index 0000000000000000000000000000000000000000..fff2a62da0cebb6c1a0fbe436f51f7bfb01a179a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +inline ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +inline ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..0224a4c10ea579079125b3f4572dd6428d4d9112 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +inline at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f6d401480c9599c8ca9983c8b0e2ce2aa4dd1b9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9321ca3553970218b76e546843fbd9eeee283380 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..84627ebb6aeecd4b7f9a90ce1d9cd69ab6425d13 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_cell_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gru_cell { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru_cell"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e384d7e883b60ad757af797fc5cd6b9765f06c3b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_native.h new file mode 100644 index 0000000000000000000000000000000000000000..660c224bc1f78f8856e3c38d3f85efc288d4bcd6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..725e7357f9bf82b4df0533b4b91d5f69aa6d3b80 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gru_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gru_input { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru"; + static constexpr const char* overload_name = "input"; + static constexpr const char* schema_str = "gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API gru_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru"; + static constexpr const char* overload_name = "data"; + static constexpr const char* schema_str = "gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt.h new file mode 100644 index 0000000000000000000000000000000000000000..4723c372b206a5a1b9b8ec272c82dd9b2b85d27a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} + +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar::call(self, other); +} + +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} + +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f724a8bd64203aedca205524be7b1c9829c1bec --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..483cc671f6729eabc04d0da06671dd270808f08e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..85d12e276c6d29efff5453901c49f12d38031d7a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eec38160f658be882fb71b4c2544128d23d2cc40 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_gt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16e0c6ae27ec231eab32c0f51783c23e8f533017 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9f0beb292ea098b701adcfabef57cd6e9b4d0ca6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gt_Scalar_out : public at::meta::structured_gt_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_gt_Tensor_out : public at::meta::structured_gt_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..519c4a08816866a83113e227ca536ff5c09ef063 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/gt_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gt_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API gt_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "gt.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API gt_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "gt.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API gt__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b2a7edcbc469f92df7a632dd8f6d34b45ba3a0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window.h @@ -0,0 +1,98 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hamming_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::hamming_window_out::call(window_length, out); +} +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::hamming_window_out::call(window_length, out); +} + +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} + +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} + +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79096ea957d42a4fd6e944d9248cc30e75a58b08 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fff7f9603a2652e35390d32fee4586d9caa88767 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hamming_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_alpha_out(int64_t window_length, bool periodic, double alpha, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_alpha_beta_out(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..32dc403181bea3b197e845b09ceb67483eec2ca3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hamming_window_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hamming_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha { + using schema = at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha_beta { + using schema = at::Tensor (int64_t, bool, double, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_beta"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_alpha_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_out"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_alpha_beta_out { + using schema = at::Tensor & (int64_t, bool, double, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_beta_out"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window.h new file mode 100644 index 0000000000000000000000000000000000000000..164be83cf3252a351f752aefe3d8744a16193363 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hann_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hann_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hann_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hann_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::hann_window_out::call(window_length, out); +} +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::hann_window_out::call(window_length, out); +} + +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hann_window_periodic_out::call(window_length, periodic, out); +} +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hann_window_periodic_out::call(window_length, periodic, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa8370bff1e6dae0c4334b177ab967a38d57bd52 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..922efafc919ecb48dfc5668542bd88e53f97341d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hann_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hann_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hann_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b73f772aecc8989024cad3f35dc7435915338b78 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hann_window_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hann_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hann_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hann_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API hann_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink.h new file mode 100644 index 0000000000000000000000000000000000000000..5d6bcfdbaff51c3e9d8257a1827e12f0f7ea16a9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} + +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +inline at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink::call(self, lambd); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3b633476d843c7c305889964425b5698b0723e64 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input); +} +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input); +} + +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward::call(grad_out, self, lambd); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dfd43931f6dedc22bee5c20d79e93faeae1ec411 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ef8a77c802d5f37f7b80366ac3bab092e66d2e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..445601ce2dd1c22b3846d08cd243f4419bf09d2a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9329b1002f840f07697e505471b6bf25e67fd8d2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardshrink_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc85009aeb68111fd683a5b8c664c2328109c2d4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c253ed9ad98634eb8753501d45d8d26570474a41 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardshrink_backward_out : public at::meta::structured_hardshrink_backward { +void impl(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2f2ef57c3c6df5f47464d034ade567896652d738 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API hardshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd72913cf2d7634005a31bec582ad9b5d0702717 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5932ade5401bb862660c209b97f63c25edf0bdfe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b790fa52a040fc567038866e5a65e73e439276d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..875796c23af6ff2099d447d22500ed77fcf6a6e0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardshrink : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & lambd); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..148c4c727a08ebe1f1c7dd0ed3cab5e1f34b9729 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e824a04eb2dbe09de40f3d4d539d08e7767518c9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardshrink_out : public at::meta::structured_hardshrink { +void impl(const at::Tensor & self, const at::Scalar & lambd, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f656c071c828afac68b817bfece276e2ddb006f6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardshrink_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); +}; + +struct TORCH_API hardshrink { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..a0eab37d892321779d3288a72094ee3b3367c9a7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardsigmoid_out::call(self, out); +} +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardsigmoid_out::call(self, out); +} + +// aten::hardsigmoid(Tensor self) -> Tensor +inline at::Tensor hardsigmoid(const at::Tensor & self) { + return at::_ops::hardsigmoid::call(self); +} + +// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardsigmoid_(at::Tensor & self) { + return at::_ops::hardsigmoid_::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b5901517e1e80c2376d77db9470ad16de2dd36e9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input); +} +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input); +} + +// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward::call(grad_output, self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7fd64233380f1efd373dbde2358bd92a50cd2723 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dadf19a8e183688fc5e996cfcc9d2bbfe97e4630 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8272aa8bf4eafe3d267d131fb9b1195d41d584f8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d3d549f941663aa7c7465594383213f744fd835a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f03850a3bf822100334a02fafa9bdb204a827e34 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d4231b0ac7f782ca21b20e00a4a2f2dfe58f72aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardsigmoid_backward_out : public at::meta::structured_hardsigmoid_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e32c962bc3827d97efc0dc5730b8f97494f09be4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); +}; + +struct TORCH_API hardsigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..052865f50e6d6cefdc446a0ea00cbaafb4b2e9a1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d2b71a0dc9f2474cbb49f2618b069bc85e8815a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7bb9e8f7292fc58c830b4a05dc57d49713fb4cd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eaaf7658beb9d3c79f23d8039bc800fb812f0fca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e9466811d854687ccb783cad3d50ac36fb24841 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6d49117cbfaac09f19f22ef96e7b02d3e24bebb8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardsigmoid_out : public at::meta::structured_hardsigmoid { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor hardsigmoid_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out_quantized_cpu(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..47f2e5ebbd55a2a525774e181aeaef428f4bb2c9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardsigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardsigmoid_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish.h new file mode 100644 index 0000000000000000000000000000000000000000..1cda0ee370d81180ebb17eb4681d006a96a0af4c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardswish_out::call(self, out); +} +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_out::call(self, out); +} + +// aten::hardswish(Tensor self) -> Tensor +inline at::Tensor hardswish(const at::Tensor & self) { + return at::_ops::hardswish::call(self); +} + +// aten::hardswish_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardswish_(at::Tensor & self) { + return at::_ops::hardswish_::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..f18fd423e59a61d1eb9718381033d8c941a40b83 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::call(grad_output, self); +} + +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1af2481d287bb5e1bca8557249bab116c58030c8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c5f8e0c553c7707ad051cb317bdd508e68288378 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c9d17aea8d98fba94c016c79f2f7f5c75209e493 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fc7e468715a3e1f773f9f55de36bda28814cb4b2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & hardswish_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d3fa7cf66b38bf1139ab6a7c184592ebdc0fc183 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardswish_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish_backward(Tensor grad_output, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +struct TORCH_API hardswish_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ab36980d841ec69876a756ca8074ba699122ff3c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb97d99fcdbf493a8c40a64e2e0927e16d7ed175 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..931cd469d46bc3dcef8fc30d75c4334ebd8d6f2c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b0d2a3e8996aea53f93f1cd2d84e6fc0842fd3ce --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b1ea8fd358c912180262b8ce5e9c62a87fd215 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardswish_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardswish_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardswish { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardswish_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh.h new file mode 100644 index 0000000000000000000000000000000000000000..8e8d6897a99135844c55980373f271d52def6c94 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::call(self, min_val, max_val); +} + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::call(self, min_val, max_val); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..7c6ff23cf66d41989826592a52d21c288e16063e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input); +} +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) { + return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input); +} + +// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor +inline at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..47d31e91a2b5dd2255d8e0c386ee69b364584bb6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..85965108398fa2f1c33504fd17aa402a9fa5c1c3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ad7cb50ed5d3138068fe7af44339d42d1880a9d7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a5ff4d9ee2387b383aeed263581e8817cc3bbeb1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardtanh_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +}; + +struct TORCH_API hardtanh_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd981a65e28fa2588d212207ee8dce63c82413d7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ef83155179ef193e1110e62e776f6eb8fa81ad8e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21ef9607c77a60e826587b8fd1fe002351254c86 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..565070dafe4780130e451e5e2b89f9ba22b2a387 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor hardtanh_quantized_cpu(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out_quantized_cpu(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_quantized_cpu_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7474911ce8ac7527e0e158a585ca09bc08900b98 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hardtanh_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardtanh_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +}; + +struct TORCH_API hardtanh { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +struct TORCH_API hardtanh_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside.h new file mode 100644 index 0000000000000000000000000000000000000000..a9bfa711047d3478c471e1a0a325f18dbf451bb2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_out::call(self, values, out); +} +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) { + return at::_ops::heaviside_out::call(self, values, out); +} + +// aten::heaviside(Tensor self, Tensor values) -> Tensor +inline at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside::call(self, values); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aca1c927c35f462355fce9f2a2a738ac365fb384 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff81ece9493ae3ff737949acfb1f78520bb2f338 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..089acd479a83c7542b3d9ff773d4e3190b832ecc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..267a5b2699733ad4e0e686a58d1258afcfd35d5d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_heaviside : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & values); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e45d172100c82a1dcc4107ebd89a79a45496c4f1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7c380b43cd38450fcba5480b25c2fa5a7d1a54d8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_heaviside_out : public at::meta::structured_heaviside { +void impl(const at::Tensor & self, const at::Tensor & values, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd1d6a938ef4388233823aa7d835e7229fd7000 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/heaviside_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API heaviside_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +}; + +struct TORCH_API heaviside { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "heaviside(Tensor self, Tensor values) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & values); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values); +}; + +struct TORCH_API heaviside_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & values); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..f406903d2e3b875fdb551232180f6af814049f65 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor +inline at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37c70ee58e62642b6003066eef2c36aba794f664 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b25ca6a990e5b31060a7c4b088875032a88ad607 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1f7a765aec2d4d59ba6af24f2777389dee135649 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hinge_embedding_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hinge_embedding_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histc.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc.h new file mode 100644 index 0000000000000000000000000000000000000000..8645c3ed49e3a349f0b90d9c81e2afaee95f3a73 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::call(self, bins, min, max); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..adebcf8fde09b9ea5f88fe1f3688ad9f1e390f3d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..538157061b1b1fac45b42d0eecf3364087003a5e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..47ed82c28b029bfc4eb3501f187fde9442d9c267 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor histogram_histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histogram_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor _histc_cuda(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & _histc_out_cuda(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..83b9fdd9c1b231e26deda4b773bbc431b7ddf9fb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histc_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histc_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histc"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API histc { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histc"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram.h new file mode 100644 index 0000000000000000000000000000000000000000..40f6964dfd7afed8c6c8ccdfad2129d2758faaa1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges); +} +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges); +} + +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor::call(self, bins, weight, density); +} + +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges); +} +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges); +} + +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d8e3304ddd8f5c1f8840852743b65a55bd18ee90 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbb87825f43bd3cf257fc03eb14f35c3a843b053 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d83cbc589dcfe1ba13d3723b459bd34e886289e9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogram_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histogram_bins_tensor_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bins_tensor_out"; + static constexpr const char* schema_str = "histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +}; + +struct TORCH_API histogram_bins_tensor { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bins_tensor"; + static constexpr const char* schema_str = "histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogram_bin_ct_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bin_ct_out"; + static constexpr const char* schema_str = "histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)"; + static ::std::tuple call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +}; + +struct TORCH_API histogram_bin_ct { + using schema = ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bin_ct"; + static constexpr const char* schema_str = "histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)"; + static ::std::tuple call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd.h new file mode 100644 index 0000000000000000000000000000000000000000..fd78e51550bda5a156a004c263ffa2071175b90e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd.h @@ -0,0 +1,41 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd::call(self, bins, range, weight, density); +} + +// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density); +} + +// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9af7511ae817fb3e2d57db4fb99fc90545a431d8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40e325547ea592f453b936ece52490554f2da8f7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8abea51d3cc869b715298c93d888e5b1ea829a94 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/histogramdd_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histogramdd { + using schema = ::std::tuple> (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_int_bins { + using schema = ::std::tuple> (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = "int_bins"; + static constexpr const char* schema_str = "histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_TensorList_bins { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = "TensorList_bins"; + static constexpr const char* schema_str = "histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit.h new file mode 100644 index 0000000000000000000000000000000000000000..cb532cc913512689d3df2a5fb41f831429afddf2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector hsplit(const at::Tensor & self, int64_t sections) { + return at::_ops::hsplit_int::call(self, sections); +} + +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::hsplit_array::call(self, indices); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dbc6f2d318cdeca26a12f2e9404469819f40a84e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3d79779fbcac793878b9648694841bb4fd8020aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..25d4619bad74b1d0d2036b1b2dfa230980f9ab19 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hsplit_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hsplit_int { + using schema = ::std::vector (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hsplit"; + static constexpr const char* overload_name = "int"; + static constexpr const char* schema_str = "hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]"; + static ::std::vector call(const at::Tensor & self, int64_t sections); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections); +}; + +struct TORCH_API hsplit_array { + using schema = ::std::vector (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hsplit"; + static constexpr const char* overload_name = "array"; + static constexpr const char* schema_str = "hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]"; + static ::std::vector call(const at::Tensor & self, at::IntArrayRef indices); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm.h new file mode 100644 index 0000000000000000000000000000000000000000..a2e04ae81ca954aaff5bf4934f3ba348f16b4c63 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm_out::call(mat1, mat2, out); +} +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::hspmm_out::call(mat1, mat2, out); +} + +// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor +inline at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm::call(mat1, mat2); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f9b685f9abe35c31d56cbefffb59faf8264cb519 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hspmm_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +TORCH_API at::Tensor hspmm_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fccb062067f76ec8626ea171df65530a73118a78 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hspmm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hspmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hspmm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +}; + +struct TORCH_API hspmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hspmm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hspmm(Tensor mat1, Tensor mat2) -> Tensor"; + static at::Tensor call(const at::Tensor & mat1, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack.h new file mode 100644 index 0000000000000000000000000000000000000000..eee131cf910da917d307bbc663e049b3656c9aac --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hstack(Tensor[] tensors) -> Tensor +inline at::Tensor hstack(at::TensorList tensors) { + return at::_ops::hstack::call(tensors); +} + +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::hstack_out::call(tensors, out); +} +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::hstack_out::call(tensors, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8433a393351dad9afe1bad190138dd0baf51f15 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cb089aa1e0839c15e70107aa4bb12e1fdadf8769 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..424a590fd7786462d00291cca6104ad84b86681b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hstack_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hstack"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hstack(Tensor[] tensors) -> Tensor"; + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API hstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hstack"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..2bd36c4e57aa2f4c0ac381efcccddfc4cafe2751 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} + +// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor +inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::call(self, target, reduction, delta); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..f9246999394804e538d5fb2c9568a49cb50a6758 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input); +} +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) { + return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input); +} + +// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor +inline at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b38ad906d2fb3a269713cb8d2dd22255921aefc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e873ec4b7256790949f55ee879089a2e7d638449 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aee4372eb20adac63489bda701baaf723e340c04 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72055b686cebb4d81425c78731b80ecd161d2f79 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f3c5e91cb15e820659c44f1e6f5a275bef092577 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); +}; + +struct TORCH_API huber_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30307dfdd57317771224201a21887dd6a99560fe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c71bf0b27c2c76e7d859c0d4b17ae5bdb3c06ff --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f664bae0735eef1d70a7b8e523d78bf4c68d0d04 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4b03689db297f9d13d265764336374b76e468f53 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/huber_loss_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +}; + +struct TORCH_API huber_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot.h new file mode 100644 index 0000000000000000000000000000000000000000..874e895c2eb173b9dc679a083b1ab929deac457c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_out::call(self, other, out); +} +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::hypot_out::call(self, other, out); +} + +// aten::hypot(Tensor self, Tensor other) -> Tensor +inline at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4587495475a032d8b60c117e79aff07a8196cd84 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..69de1480c86a130a8ed3d9893ea008f7b1d680ca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5a51512556a3d32fd7ee3c9d9de3dcc9fbffbba4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..941993f0e54df197ddfbfd2862b69b5e896e0113 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hypot : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f70c70f2c63e8a815f05359b054c373af9ece11 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..50da17cbf1aacfeaa5232f15212a9d55f5b32889 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hypot_out : public at::meta::structured_hypot { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68b4a953095210a3bff8df1497fe3f2acdfe6efe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/hypot_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hypot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API hypot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hypot(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API hypot_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0.h new file mode 100644 index 0000000000000000000000000000000000000000..9ab4c40a93a07284ea3e6e1125c3a00de0cc86de --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor i0(const at::Tensor & self) { + return at::_ops::i0::call(self); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & i0_(at::Tensor & self) { + return at::_ops::i0_::call(self); +} + +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::call(self, out); +} +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..827325532df99d34591dfd1af4109a23757a1e50 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6a5b11500665efe28dad9b8c5488888fa5777fbe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a9dab77e8d455ee51401f40dc587b30f8f829cf --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..092d100971be39487c689273f0eb78cf8bb60471 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_i0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..129168d0db8290fcfb26e05ee779044b3cb81917 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a6dcf3a545a145194469dc3271fbf043a3d349f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_i0_out : public at::meta::structured_i0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..214d34750c597620b3f9fd9408c6b9d2cb990dca --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/i0_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API i0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "i0(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API i0_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "i0_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API i0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma.h new file mode 100644 index 0000000000000000000000000000000000000000..353cfeb3a5dd4ce55ff85a8b29e36188df756f78 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_out::call(self, other, out); +} +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igamma_out::call(self, other, out); +} + +// aten::igamma(Tensor self, Tensor other) -> Tensor +inline at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b37e022be13941256bcaa5af89afeb8c8bc0bf76 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ac66e77d2d4b494116ae589fddb441c1e8b8065 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c9ac70de0db5ff23205883c3556d45d8de0b9e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..94384aac4b8ed2d125323b81e1260b4de9c24c7a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93381d748c8dc98d187c17014f3daad8c1fdc438 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5177e61e887bcd70c80ffeb88ca0eea467e4bf30 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_igamma_out : public at::meta::structured_igamma { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..518fa8ac76ee47f31396438de4c9d8c0c78b8805 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igamma_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igamma_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igamma { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igamma(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igamma_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac.h new file mode 100644 index 0000000000000000000000000000000000000000..81544d1be2d77114fad2e5082594112dc4336695 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_out::call(self, other, out); +} +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igammac_out::call(self, other, out); +} + +// aten::igammac(Tensor self, Tensor other) -> Tensor +inline at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..78a9ba3e28f63784d337b76f47628db11d1e9249 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d8bb5506d5f04a2afe11af960e9cbd97f7987fb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8a62108ed5177f2b3b91f5ea5d33af21f97d476 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9288326ab61e11dc67c6e9b80363488e1dde3b15 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igammac : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9d0283542e508aacfba983c3428b85e3962d059 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ae695d2789314a5fb473022b13ed4c8ced86c3ba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_igammac_out : public at::meta::structured_igammac { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a43c72a14a57c1feeba24fcd886c9ec373548b74 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/igammac_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igammac_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igammac { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igammac(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igammac_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..c0f35d7186023c60e6f5d6ef250a73433503967f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} + +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ac98d74e8458bf675004a7bc0bfb4aadd8357d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4caf8227cc8a558cba447a275276a166155d7db8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9af11be560e6520e989a3800d84031bceef69970 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor im2col_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor im2col_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bd8270d97d45fdb3d4542482141c67b0a5b204e8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/im2col_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API im2col_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::im2col"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +}; + +struct TORCH_API im2col { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::im2col"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/imag.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag.h new file mode 100644 index 0000000000000000000000000000000000000000..5825601e1661c899efc950ea7fbf2ce210b750e8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::imag(Tensor(a) self) -> Tensor(a) +inline at::Tensor imag(const at::Tensor & self) { + return at::_ops::imag::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97f63841f8c32ab3c6159896042f3799bb8bb65d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor imag(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7b3db4c597e6afe67fab0cfca5a4537207b7c529 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor imag(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f332c4bd110104670e41206e7916c6f03dba8b57 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/imag_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API imag { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::imag"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "imag(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index.h new file mode 100644 index 0000000000000000000000000000000000000000..609c246901ac84176e5c900597cc10b2d647463f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices) { + return at::_ops::index_Tensor::call(self, indices); +} + +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add.h new file mode 100644 index 0000000000000000000000000000000000000000..0ec72b3395995f57b80077660d7668ccd386b354 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} + +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add::call(self, dim, index, source, alpha); +} + +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_dimname::call(self, dim, index, source, alpha); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3118c7e063a6467aff32a544d7e268bc55f05629 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..166d785e3ca73646e41c2a08449a1f103d4205d3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dde466677211201fec495e3afdfe273f0559ec5c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..17875ac1c2e4e7d2358231da57ec80d088f9d308 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..482ed0695c148475749ae5976c13489c353873d0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_add : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6452fa884b24e22240a7f530bfce06af0c47c738 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b10f55e7a43533ed4913efad44b7b3a324bc0238 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_add_cpu_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +struct TORCH_API structured_index_add_cuda_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f243f4fc50318a82b2eb4c0801d0d30bcd622768 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_add_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_add_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API index_add_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6435415ad21480050966e7a19011be01fa569e6c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..48efb4d73b34de47b818b06a2f834a8bd2201145 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_out::call(self, dim, index, source, out); +} +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) { + return at::_ops::index_copy_out::call(self, dim, index, source, out); +} + +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy::call(self, dim, index, source); +} + +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_dimname::call(self, dim, index, source); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ef085efe266465cd2338193d74a9dec2d0f579f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..178fbf2e0bb9e7088eeeccc84afd6d7dd3a26b69 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70f91f2c51a8c1f94c8ce00546d5a8de5f32bd79 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7cb97bea7d3456af84ecc9f04cc0233199a3bc08 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f85b140ff41fe1a6b39feb941bd2c89bbc3bd8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_copy : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb475d9d31ee5054b471153b6d7e4d09e56c5d81 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e8c894c833061d07bbb051d03cb9224e51fdbd93 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_copy_out : public at::meta::structured_index_copy { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Tensor & out); +}; +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..14fec9393521ec6a59971457963450552bfaa30c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_copy_ops.h @@ -0,0 +1,73 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_copy_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +}; + +struct TORCH_API index_copy_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy_"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..058556edf2bbd7206313044d7dbb3a4d2b785676 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6de38e97c6a5ea753469bb9312823d76c702ca57 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..b3db2d8a7e539e934092ef92060097e7ee945331 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill.h @@ -0,0 +1,64 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} + +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..22091646a157b3b46241a4f004c7af1569f9b70a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0834a26a45504ab8efc1367c3bd35a01fdb947c2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b69dcba52452abc7e57f2b161e92ce5baab018b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56ba266ae05797362455bdcea3e5b5267becd328 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8d79fa880ee9a57d91f05ff92fa452fcea8abe92 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..47f1492195b3ffda9509969d90b41b6131afc8ff --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_int_Scalar_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_int_Tensor_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c127f0f3757456b639f1c29f592eb37d703f3335 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_fill_ops.h @@ -0,0 +1,128 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_fill__int_Scalar { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "int_Scalar"; + static constexpr const char* schema_str = "index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_int_Scalar { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Scalar"; + static constexpr const char* schema_str = "index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__int_Tensor { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "int_Tensor"; + static constexpr const char* schema_str = "index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Tensor"; + static constexpr const char* schema_str = "index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill__Dimname_Scalar { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "Dimname_Scalar"; + static constexpr const char* schema_str = "index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__Dimname_Tensor { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "Dimname_Tensor"; + static constexpr const char* schema_str = "index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_Dimname_Scalar { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "Dimname_Scalar"; + static constexpr const char* schema_str = "index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_Dimname_Tensor { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "Dimname_Tensor"; + static constexpr const char* schema_str = "index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Scalar_out"; + static constexpr const char* schema_str = "index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API index_fill_int_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Tensor_out"; + static constexpr const char* schema_str = "index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..534fc6596940417705d087721cdc002b54fbe836 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_Tensor : public TensorIteratorBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_sizes(at::DimVector value) { + static_assert(SIZES == false, "sizes already set"); + precompute_out ret; +ret.sizes = value; +ret.strides = this->strides; +return ret; + } + + + precompute_out set_strides(at::DimVector value) { + static_assert(STRIDES == false, "strides already set"); + precompute_out ret; +ret.sizes = this->sizes; +ret.strides = value; +return ret; + } + + at::DimVector sizes; +at::DimVector strides; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, at::IOptTensorListRef indices); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e1044866603c8d68b0ac51f50ae662b6652ce8c4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8022b13f7f2a5beef62f2d04f4b31ac75efabf28 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_out : public at::meta::structured_index_Tensor { +void impl(const at::Tensor & self, at::DimVector sizes, at::DimVector strides, const at::Tensor & out); +}; +TORCH_API at::Tensor quantized_index(const at::Tensor & self, const c10::List<::std::optional> & indices); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cefe64ec1173c63da21d7975b028994c5512eae5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_Tensor { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices); +}; + +struct TORCH_API index_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put.h new file mode 100644 index 0000000000000000000000000000000000000000..bdfd035e8b06649516975b82ea2fc808494de838 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_::call(self, indices, values, accumulate); +} + +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +inline at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put::call(self, indices, values, accumulate); +} + +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_out::call(self, indices, values, accumulate, out); +} +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { + return at::_ops::index_put_out::call(self, indices, values, accumulate, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97962a46b3f98fabdbec6c75a17035f6eb2d2352 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..528fcb4082ed810eae2890f0a773cb94352eec8b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f52f3448797fe27558fa5adc64d0d779ef2587ee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_put_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_put_ { + using schema = at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); +}; + +struct TORCH_API index_put { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); +}; + +struct TORCH_API index_put_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..6a081628fc980a1ee91cf7d45a1a6105869721d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} + +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0cbcb927f3b38d8bdad4cd5a3d9966a242889f2c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3b4ef268fedf54d52916957c89814c2478b0e329 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f0273a6d873749fb4a7e4844140d56ce96bd02 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..4949f54639ad824afa3c9eeb3493f9fd123eb560 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_reduce : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0928a866a3f3c908ef0fcc3d0a4605d32c17f087 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0d56002b44131b4653ddeb51fedcc12d59f7af6e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_reduce_cpu_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +struct TORCH_API structured_index_reduce_cuda_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cf53cc8e6b0e5844c60198df1349ce8dfbf195ee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_reduce_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_reduce_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +}; + +struct TORCH_API index_reduce_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +struct TORCH_API index_reduce { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select.h new file mode 100644 index 0000000000000000000000000000000000000000..f70a74d3da48401c8944ec1df69bf3d53807297b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_out::call(self, dim, index, out); +} +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_out::call(self, dim, index, out); +} + +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +inline at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select::call(self, dim, index); +} + +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname_out::call(self, dim, index, out); +} +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_dimname_out::call(self, dim, index, out); +} + +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +inline at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname::call(self, dim, index); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0b703cae8f35e2d6896579e3940571e698481724 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +inline at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); +} +namespace symint { + template >> + at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); + } +} + +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +inline at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); +} +namespace symint { + template >> + at::Tensor index_select_backward(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); + } +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..000bde122ab6a6fda3f44b16165fc8caa7861bd1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..640fbff25667bdc6a029b7b3bda71ff0f5a5f521 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d2d8c5f7357bbf71f37aa2c88d21da51e0983487 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_select_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a25c82f799ce17129197a0e1c83b1e4003eac2c3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2824713c4813e43095e1637cb5cf6b6785f4ae3a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f0fc95a0eb76cddd132147cf2997a564baf1a95 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6cd62338e7aa25a3b346e4fb7a3eb497f47b06e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_select_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +TORCH_API at::Tensor index_select_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +TORCH_API at::Tensor index_select_sparse_cpu(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_sparse_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_quantized_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_quantized_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68aed4c76242c52b6d4b28c147d8ce3e17039a49 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/index_select_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_select_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_select(Tensor self, int dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index); +}; + +struct TORCH_API index_select_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices.h new file mode 100644 index 0000000000000000000000000000000000000000..eb0a876c342a4ad1392a8c533ea4eecb8ec3c90b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d404e48c7f4ffd4284f042899c16bcd4939cb9d4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor indices(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..61d21b170fb23312c9d7de6527e4124ff7f89c5a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::indices_copy(Tensor self) -> Tensor +inline at::Tensor indices_copy(const at::Tensor & self) { + return at::_ops::indices_copy::call(self); +} + +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::indices_copy_out::call(self, out); +} +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::indices_copy_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..12d87d4096516445e86a5030c36106dd35f769c2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d044875a098f7215113167df56844b39527b158b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..17403631c73fab24b1b312eba485a370de6dc9cb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & indices_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor indices_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..97f35a3aa4d60e7f7035681885eb767e854ec397 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_copy_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "indices_copy(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9bfe32af4b6eb5dbdd739d477a4520027b764c03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor indices_default(const at::Tensor & self); +TORCH_API at::Tensor indices_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9aa3b3558030a362a9d7f4c64cb6c4fc239e67aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/indices_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "indices(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e8c9a24224c9c4896b81c04a505ebd1ecdb75243 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor +inline at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) { + return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a6cf1f38aec40ebfce7ef5cdde813b96eca321cd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..830d3a4e890aea586e11bb08cafda6ecce8185af --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0d6f67478a5aff3e2c2976bd47e5d94ccf5909e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API infinitely_differentiable_gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::infinitely_differentiable_gelu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inner.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner.h new file mode 100644 index 0000000000000000000000000000000000000000..b3c9d05cf8e17d37e24aeede7dd22ff1ade4356c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::inner(Tensor self, Tensor other) -> Tensor +inline at::Tensor inner(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner::call(self, other); +} + +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner_out::call(self, other, out); +} +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::inner_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c628b74e76904e63178f8f07c5373e206d5b256 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor inner(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d9cc35c724d040f4e81367522f3a7cc7ad9f7406 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor inner(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b92b22b5c41ca02b6a5937ff3f7c1817f8803d12 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inner_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API inner { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inner"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "inner(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API inner_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inner"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..b8330de72eaf7b26acab1c69478ba8c62543fc0e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor +inline at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6e981cda2e5c4982d70ccfc92409318ece7c636 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1de330ccda5f43b13d947208831e2d0b75b5df2e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac2af42432d7a1f6f9f7b20045ef951bfda2d03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/instance_norm_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API instance_norm { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::instance_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr.h new file mode 100644 index 0000000000000000000000000000000000000000..118b10b2cd2c7612d92bac7facab79f8e41fc2a8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::int_repr(Tensor self) -> Tensor +inline at::Tensor int_repr(const at::Tensor & self) { + return at::_ops::int_repr::call(self); +} + +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::int_repr_out::call(self, out); +} +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::int_repr_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..391ab5673f26908cb506d1a2c408e073db49a464 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ae68f2b3be1101a68bef94877f0d13bb561bde9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & int_repr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor int_repr_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor int_repr_quantized_cuda(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2992a2dcf42fe7b6f5447765682790898b22f462 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/int_repr_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API int_repr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::int_repr"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "int_repr(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API int_repr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::int_repr"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse.h new file mode 100644 index 0000000000000000000000000000000000000000..1c17f349ca8edadaa51096be52b022f9526563ef --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::inverse(Tensor self) -> Tensor +inline at::Tensor inverse(const at::Tensor & self) { + return at::_ops::inverse::call(self); +} + +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::inverse_out::call(self, out); +} +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::inverse_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e4960ce2495b8f50c8c7b0d76f9847dbb7e5911a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor inverse(const at::Tensor & self); +TORCH_API at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7040259e443c3ceef86807bcbd864263a28b35b7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor inverse(const at::Tensor & self); +TORCH_API at::Tensor & inverse_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ef252ade08ea921f9262a2819102645632e6fb1c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/inverse_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API inverse { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inverse"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "inverse(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API inverse_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inverse"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced.h new file mode 100644 index 0000000000000000000000000000000000000000..2c40759436653c7eb7a15cdee499319267fcabf9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93e2ebc463f4e31c6583f602ac9791cf2f2aa19e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_coalesced(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_native.h new file mode 100644 index 0000000000000000000000000000000000000000..037f23a9bf5efc58b5f4c221fc8ccb13b6696814 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_coalesced_default(const at::Tensor & self); +TORCH_API bool is_coalesced_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..29e9bbacc3c2e9c190570c91a304b07d28b94977 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_coalesced_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_coalesced { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_coalesced"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_coalesced(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..91301f8d9e46a20f4ce7d439c4489fd82bd61eb4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_complex(Tensor self) -> bool +inline bool __dispatch_is_complex(const at::Tensor & self) { + return at::_ops::is_complex::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66752bc5b67625c510277df8cb10ea073372dfd6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_complex(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9332e7dd836212662da858e78638f47a07d59bc4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_complex(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0245af432caac433e84d2e093dfadb04dc81c013 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_complex_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_complex { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_complex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_complex(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj.h new file mode 100644 index 0000000000000000000000000000000000000000..ebd565a66cb7a66c66bd4c166cddc4d2e40416e0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_conj(Tensor self) -> bool +inline bool __dispatch_is_conj(const at::Tensor & self) { + return at::_ops::is_conj::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df548eb47b21a3481103fae2e095b039eb81be71 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_native.h new file mode 100644 index 0000000000000000000000000000000000000000..37bed9ce4ec9bdbcac590a178667562c8b9e686a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_conj(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..76f17575c9f071eb75bb67b51fe86c25fd040f11 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_conj_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_conj { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_conj"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_conj(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed.h new file mode 100644 index 0000000000000000000000000000000000000000..d3815a3ce27e6331ae7d10143f75da57a5c3cbfe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_distributed(Tensor self) -> bool +inline bool is_distributed(const at::Tensor & self) { + return at::_ops::is_distributed::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..88bffbd1f46d837b119250cbaae9b2b412026676 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_distributed(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d4e9f4815d22a577ef2af1aaf78e96d3ae7f70 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_distributed(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0ce9267bcea67bab051ae7078d2d9d3a0ff1c94b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_distributed_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_distributed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_distributed"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_distributed(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point.h new file mode 100644 index 0000000000000000000000000000000000000000..66598e0baf345a350cc1cccdda8e421edbc95258 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_floating_point(Tensor self) -> bool +inline bool __dispatch_is_floating_point(const at::Tensor & self) { + return at::_ops::is_floating_point::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9fef3a5df5096e42a3d00470f6197759983d88b9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_floating_point(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1baf7b70ca7ad6ab3cddf4e645296eb0c8948c27 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_floating_point(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8aac762f2e30a037e5d7f011309e5bc1ae951188 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_floating_point_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_floating_point { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_floating_point"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_floating_point(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..7d11a62436f56c923053f44e5b817abf9cba7af3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_inference(Tensor self) -> bool +inline bool __dispatch_is_inference(const at::Tensor & self) { + return at::_ops::is_inference::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4bd9b4f2d1f423eeeb369eaa852193d39f275fa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_inference(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e71965e829ddba5cf952ef990f44eac44a86d47c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_inference(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4bc4c0e6d1292abac3fc63327ed6d8d9532133f2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_inference_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_inference { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_inference"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_inference(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf.h new file mode 100644 index 0000000000000000000000000000000000000000..c7852700118c1063cf895616f2650c6115f139ec --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..034927bd56d3f9e5f0f9ab102f6c64a44f0da250 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_leaf(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..db1d03c8f5971562952c40fc34289696b8ff0e5a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_leaf(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00d0f8e6a497881493f8768bb75d440a6de30ed9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_leaf_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_leaf { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_leaf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_leaf(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg.h new file mode 100644 index 0000000000000000000000000000000000000000..8a82d4c48509fab3b270cb45034991d10a4a5fbf --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_neg(Tensor self) -> bool +inline bool __dispatch_is_neg(const at::Tensor & self) { + return at::_ops::is_neg::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc015ddd8172a3aa7ee89b9ed46f7abc1780bec --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_neg(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_native.h new file mode 100644 index 0000000000000000000000000000000000000000..151c83d9bbef09e529b101af8841cdc117b79d03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_neg(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7bc45b97daa330249e3e950bb22deb514f3e4547 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_neg_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_neg { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_neg"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_neg(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero.h new file mode 100644 index 0000000000000000000000000000000000000000..8a644c5d46fcdd75ee022464bc10aaf54d2705fc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_nonzero(Tensor self) -> bool +inline bool is_nonzero(const at::Tensor & self) { + return at::_ops::is_nonzero::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d268c7f38df6ab0223dc4f5530b6650f37de557e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_nonzero(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4911287bebe6ae40785f34fa6e04e8ab0cea3428 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_nonzero(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d0f063a079bc37f853c5460ca3c53f23ec3c0ec --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_nonzero_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_nonzero { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_nonzero"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_nonzero(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned.h new file mode 100644 index 0000000000000000000000000000000000000000..5859f44ce37b4a28c42d087b3abe698dd1647d5e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76d6244d189129a78f84970fec6137be4034e8b0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_pinned(const at::Tensor & self, ::std::optional device=::std::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_native.h new file mode 100644 index 0000000000000000000000000000000000000000..240616433ed6e96283432a40f7dc8003d5895765 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_pinned(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_coo(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_compressed(const at::Tensor & self, ::std::optional device=::std::nullopt); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..942a4213fa90ecac178ae49020c843fbc454393e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_pinned_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_pinned { + using schema = bool (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_pinned"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_pinned(Tensor self, Device? device=None) -> bool"; + static bool call(const at::Tensor & self, ::std::optional device); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional device); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe00e4be014784e96c69a574205673bb9a3cf03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_same_size(Tensor self, Tensor other) -> bool +inline bool is_same_size(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::is_same_size::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f59441243c2cb56397418e84ecc14ea43f1590b2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f388c0b2f44fb29a715abf761ee294eda5893436 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); +TORCH_API bool nested_is_same_size(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4a428b78c82e82c168ddbfbccc3466e064fae42b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_same_size_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_same_size { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_same_size"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_same_size(Tensor self, Tensor other) -> bool"; + static bool call(const at::Tensor & self, const at::Tensor & other); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to.h new file mode 100644 index 0000000000000000000000000000000000000000..14da5c2b92d79ffa1cf128f8cef1d62b9db9cdc8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..12cfeabf9c0330f18ae6daf503dc8e40cc18137c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fea34cfda3b0a49e6f7837c996d7b6b67c26e1b1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..84c1cb682c0467dddbe430a669ce25d8fbd811ff --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f26d8ad96ad15b2b204ce4d7a213253bfdd222 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_set_to_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_set_to { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_set_to"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_set_to(Tensor self, Tensor tensor) -> bool"; + static bool call(const at::Tensor & self, const at::Tensor & tensor); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed.h new file mode 100644 index 0000000000000000000000000000000000000000..e8bd74410d9f3ccf8b517bb3db17d3a2fc36d959 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_signed(Tensor self) -> bool +inline bool __dispatch_is_signed(const at::Tensor & self) { + return at::_ops::is_signed::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..53d963a1a94859483084268a1d432750c2f96dfe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_signed(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..48b2a7fe649b9874a20c4a5a3952212cc37d0175 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_signed(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d0e9a2e1b440442d4d57bc856e64687b2301ee1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_signed_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_signed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_signed"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_signed(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available.h new file mode 100644 index 0000000000000000000000000000000000000000..c43c0ed853d34b45a467366978fe35c41205e908 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_vulkan_available() -> bool +inline bool is_vulkan_available() { + return at::_ops::is_vulkan_available::call(); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..773b7f8a87d77c7df5e4ba841945a01c98260aab --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_vulkan_available(); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9362100351a25eaa5c012dbb5d479d422f4b0848 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_vulkan_available(); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..99357d62246cd969ff14690a059c0afe33c64791 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_vulkan_available { + using schema = bool (); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_vulkan_available"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_vulkan_available() -> bool"; + static bool call(); + static bool redispatch(c10::DispatchKeySet dispatchKeySet); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose.h new file mode 100644 index 0000000000000000000000000000000000000000..7eee97942b65c7e8f25664c7510c8e97facac540 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..339f38622bd555058d4dfdf767be6e3d526f5b64 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..187b7af23bed050980c962d7a55de7c086766fdd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee3188ca5c784d8aacf0a95404f790d0bbe6faa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isclose_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isclose { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isclose"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite.h new file mode 100644 index 0000000000000000000000000000000000000000..d96499a02406c3f77b31d755e940024cbefd697b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isfinite(Tensor self) -> Tensor +inline at::Tensor isfinite(const at::Tensor & self) { + return at::_ops::isfinite::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1225e84b8b7c9e652eb5606fe923423d471e867c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isfinite(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_native.h new file mode 100644 index 0000000000000000000000000000000000000000..74e546487312352498a4825d736afb9b7148d011 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isfinite(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..10216a66e74f270c247436ddd722d662bf15c0f9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isfinite_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isfinite { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isfinite"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isfinite(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin.h new file mode 100644 index 0000000000000000000000000000000000000000..ed75b528a534cc3724664a87be5ea9f7062f42e1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin.h @@ -0,0 +1,68 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} + +// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert); +} + +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} + +// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert); +} + +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} + +// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3004eb5e1aebf0f6a30f8d12c702a1c7e3081b36 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4003e2497dda7600492f0258ed122b3be9fcccdb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6811678735ab1bd88b542b6ea11c181335d1a696 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7d3fd70a4551339e314ce930f8f63774b2321811 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isin_Tensor_Tensor : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Tensor_Scalar : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Scalar_Tensor : public at::impl::MetaBase { + + + void meta(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c57241048ddd270a4abdc32f196bbe31dee7eca9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c25f7cc55a10e7e0e9621da530e83896bff82346 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isin_Tensor_Tensor_out : public at::meta::structured_isin_Tensor_Tensor { +void impl(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out); +}; +struct TORCH_API structured_isin_Tensor_Scalar_out : public at::meta::structured_isin_Tensor_Scalar { +void impl(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, const at::Tensor & out); +}; +struct TORCH_API structured_isin_Scalar_Tensor_out : public at::meta::structured_isin_Scalar_Tensor { +void impl(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ffc1a7968820cc625fe615e4f5126d0cfead93d2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isin_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isin_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Tensor_out"; + static constexpr const char* schema_str = "isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Tensor"; + static constexpr const char* schema_str = "isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Scalar_out"; + static constexpr const char* schema_str = "isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Scalar"; + static constexpr const char* schema_str = "isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Scalar_Tensor_out"; + static constexpr const char* schema_str = "isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Scalar_Tensor"; + static constexpr const char* schema_str = "isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf.h new file mode 100644 index 0000000000000000000000000000000000000000..baa0313c27dd7ccb4f2c7568b7455cf39c319bb0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isinf(Tensor self) -> Tensor +inline at::Tensor isinf(const at::Tensor & self) { + return at::_ops::isinf::call(self); +} + +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isinf_out::call(self, out); +} +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isinf_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ccd5f83fa9f0cf581cfe4df7831fc4f8ca7760c5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor isinf(const at::Tensor & self); +TORCH_API at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1de2845e00d3c3863ffcd8a20a8afb33cd1cc66f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isinf(const at::Tensor & self); +TORCH_API at::Tensor & isinf_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_isinf(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_meta(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..26d8608adc98fc2fb2dccd07ec4b78068141e687 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isinf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isinf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isinf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isinf(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isinf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isinf"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan.h new file mode 100644 index 0000000000000000000000000000000000000000..ebc444b4cb196e14a038662a0f6c90139e9867da --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isnan(Tensor self) -> Tensor +inline at::Tensor isnan(const at::Tensor & self) { + return at::_ops::isnan::call(self); +} + +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isnan_out::call(self, out); +} +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isnan_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..061c49d67805afa7dba4a53da0a3d5b6d928b5cf --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c581ff0460266fc7399fe63c316cd8fa6627fb77 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isnan(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06ef1d806316961f508d8e59339c4ed3ce6a93ba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isnan(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f0346e8e1f0982768ef9e31f60e61e0842a20486 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & isnan_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isnan(const at::Tensor & self); +TORCH_API at::Tensor NestedTensor_isnan(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bc86ee839d87233d4069df7ae921a11df1b716bc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isnan_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isnan { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isnan"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isnan(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isnan_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isnan"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf.h new file mode 100644 index 0000000000000000000000000000000000000000..9f8fa933bffd640383483399dda6200b6dff5c5a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isneginf(Tensor self) -> Tensor +inline at::Tensor isneginf(const at::Tensor & self) { + return at::_ops::isneginf::call(self); +} + +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isneginf_out::call(self, out); +} +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isneginf_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d12455d879f19c34185650045a6048981ca1ccd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6a99f6bcefacfa33bbdc8a2aaeee096670816768 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf602ed84d14629b3a1fef3f5ce534af62682603 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..04ce3041c85b7ad5ea27e15333c50f649281f3f7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isneginf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6bcde8884d35a4bbccbfc30699e6400459cc3200 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..28e6b1168d794a90193a1ae89585e19cedd795a3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isneginf_out : public at::meta::structured_isneginf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_isneginf(const at::Tensor & self); +TORCH_API at::Tensor isneginf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isneginf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..719d357583092c8a66c1c2387e7e13359557ed56 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isneginf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isneginf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isneginf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isneginf(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isneginf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isneginf"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf.h new file mode 100644 index 0000000000000000000000000000000000000000..a47810ef212e1b643560c3809c7910c07240a961 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isposinf(Tensor self) -> Tensor +inline at::Tensor isposinf(const at::Tensor & self) { + return at::_ops::isposinf::call(self); +} + +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isposinf_out::call(self, out); +} +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isposinf_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07df3fd3fdbca247a53d7e10e8fa34349209e40b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..777957198ef37357e63d7cee27350ca1e7b4424a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d632b84410c7f1256d1330d264cd7803a979fae3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d932046b2dc2cffaf28148374b7f27ae77650521 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isposinf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b1cf9323e8851c22fedfc09e31ca30c39358cf1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c836804358c6b42c32597c84293342c3b4588f29 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isposinf_out : public at::meta::structured_isposinf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_isposinf(const at::Tensor & self); +TORCH_API at::Tensor isposinf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isposinf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c4513f942aad0ac0324983f2f4f90162c9524387 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isposinf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isposinf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isposinf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isposinf(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isposinf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isposinf"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal.h new file mode 100644 index 0000000000000000000000000000000000000000..134ea06e2f32bca6e741b1ffd11e0029037f3081 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isreal(Tensor self) -> Tensor +inline at::Tensor isreal(const at::Tensor & self) { + return at::_ops::isreal::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a129b777a285afbae263e88c3c4b6ab7f9b2bf48 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fccff2628a5ceda882570bba4f466aba22886578 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isreal(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a921976457b14d3cb66719543fc894f0d45ba530 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/isreal_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isreal { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isreal"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isreal(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/istft.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft.h new file mode 100644 index 0000000000000000000000000000000000000000..391fe5a76ec0ed51c49e83eb2aeefd2c57531ed2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b40ef70588d4cfb92aee2106e635c0f3613e5799 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8ed6459cb275ec36f39114a20337d7d20fce2a50 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..32724069bceb77610ca5f107a68e0d53cd1404ab --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/istft_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API istft { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, bool, ::std::optional, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::istft"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, bool normalized, ::std::optional onesided, ::std::optional length, bool return_complex); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, bool normalized, ::std::optional onesided, ::std::optional length, bool return_complex); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/item.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/item.h new file mode 100644 index 0000000000000000000000000000000000000000..d70eeedffd90785df99183c87b413769aa17aff3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/item.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..620c96110d7d5d75157600068e54c17e510a588a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Scalar item(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/item_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_native.h new file mode 100644 index 0000000000000000000000000000000000000000..13fa472e9885245868908bd098e2917eb8010601 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Scalar item(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/item_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f810d966189764b566b5528e3019174f836fe0e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/item_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API item { + using schema = at::Scalar (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::item"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "item(Tensor self) -> Scalar"; + static at::Scalar call(const at::Tensor & self); + static at::Scalar redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..826527bd5d5cee99727f23378cc6b9d893e4e16c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,80 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..95dd2f5df0dbb09647288e3a43e2b587e160cee8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f7c72d05a0a8951eb1a7a95309df37ff54116c18 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_beta_out(int64_t window_length, bool periodic, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1a9c49e664ed81441980e82d93ae00520366e0d2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kaiser_window_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kaiser_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_beta { + using schema = at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "beta"; + static constexpr const char* schema_str = "kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_beta_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "beta_out"; + static constexpr const char* schema_str = "kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div.h new file mode 100644 index 0000000000000000000000000000000000000000..9ec1fc34d7773bd087da5a7926cf04096a1d1ef1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor +inline at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) { + return at::_ops::kl_div::call(self, target, reduction, log_target); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..772d635d0c08b2714b85efd9bd37cd4d8679da6c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_native.h new file mode 100644 index 0000000000000000000000000000000000000000..80bf6bdace3e7a154df17064cd600a62ef4951dc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c42b234177dea923e0afdff050a52fc7f17e88c2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kl_div_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kl_div { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kl_div"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kron.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron.h new file mode 100644 index 0000000000000000000000000000000000000000..69c4164b085a8dab5f6c8a1ef64c0ea5d14816f2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kron(Tensor self, Tensor other) -> Tensor +inline at::Tensor kron(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron::call(self, other); +} + +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron_out::call(self, other, out); +} +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::kron_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..85ad59dd0f41d91103874e2811513000df1bf08a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor kron(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6579915f58def8849aabfd591a8d653e5261e123 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kron(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8b6d957269e2d7945dd738b9d3e88881a92678d8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kron_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kron { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kron"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kron(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API kron_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kron"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue.h new file mode 100644 index 0000000000000000000000000000000000000000..e9bd3bc244aca97ebe5b9235bf6327f03868dfb3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue.h @@ -0,0 +1,158 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b6317befc588ab643965be00ec03e8c7d0ce661c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..987fc7e8338005303a9eaedd6560609487800cae --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2504cefc6686244e43fc964e01388b9d95928ccd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9d80a1e1d47c3c943cab9f9216ca0e70e229c56e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_native.h new file mode 100644 index 0000000000000000000000000000000000000000..77a0e33cf4de19ed5bb8894d650abfc38e29676c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out_cpu(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_out_cuda(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..571064fbf2fed95009fbe87f8795c1f8410a9c08 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/kthvalue_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kthvalue { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim); +}; + +struct TORCH_API kthvalue_values { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "values"; + static constexpr const char* schema_str = "kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API kthvalue_dimname { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API kthvalue_dimname_out { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..c3aef4a17985388242bacfe12ab45749383ffeb0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::l1_loss::call(self, target, reduction); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c2b75eadf2e5ef9d3c553f82bcb8b90e386be90 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e9677a303a96cb023e4503925283b3d988036113 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1611d8961450a6629e70f13509cd370b3b06dd37 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/l1_loss_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API l1_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::l1_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..4a44b7378ce4006c390561609f99617abe3bf5e7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +inline at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); +} +namespace symint { + template >> + at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); + } +} + +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +inline at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); +} +namespace symint { + template >> + at::Tensor layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); + } +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa9273f9e08f4895ce022041fbf838e404715426 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6c47a0a195c601703bf9bb80dea5bba59195df06 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ff51b3e1ac3264ff4c3c199d13b5dc98499bd5ea --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/layer_norm_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API layer_norm { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, const ::std::optional &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::layer_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor"; + static at::Tensor call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enable); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enable); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm.h new file mode 100644 index 0000000000000000000000000000000000000000..b81e105156b483e79ede8ee915208e05a647e775 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_out::call(self, other, out); +} +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lcm_out::call(self, other, out); +} + +// aten::lcm(Tensor self, Tensor other) -> Tensor +inline at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm::call(self, other); +} + +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3c91e7a72e8482be8499c95d8ffcbae681c9e462 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a6f6f3ac4d7727a8fb7e901f1ca11bf1fd829eee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..34de24f1002db09b95846ea3789387a4ff4e8973 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9624d58dafb0acee5bdd18ccb8d40539a8d48b4b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lcm : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd31f490f018fda7ec3868d026a3cd2d4618a676 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0f3f144e0e3a2fd4b0c9dba4cd719edf6b59cf25 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lcm_out : public at::meta::structured_lcm { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a986ce593d325a9994e376827faaee412211b975 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lcm_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lcm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lcm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lcm(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lcm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp.h new file mode 100644 index 0000000000000000000000000000000000000000..ac93bb90d3057873b933b6dab1e0848a2e8f4ad6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_Tensor::call(self, other); +} + +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_::call(self, other); +} + +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_out::call(self, other, out); +} +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ldexp_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be2e73d3ea1a4369ab84ac0b1cf6c3a9c20c7f54 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2f8d0e1015ac156de8ad778785aa3cdd9e39556 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0abe701abadd7278d827c2cb4c4f1bf3a5c52655 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/ldexp_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ldexp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "ldexp.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le.h new file mode 100644 index 0000000000000000000000000000000000000000..403849343495bda546e4e2664546b7066dd031f2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar_out::call(self, other, out); +} +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::le_Scalar_out::call(self, other, out); +} + +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor le(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar::call(self, other); +} + +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor_out::call(self, other, out); +} +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::le_Tensor_out::call(self, other, out); +} + +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor le(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0aa3c2d51d6e608b9ac8a8c25d9a79b6447cb612 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff07d5f28e8248329c9812fbf46109f42846ab88 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c1033bedd4524ca573edb3c362856365d1fa2d9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b376331a0f7d8603117e74c42088a2c8f5c3a6f0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_le_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_le_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24bf8d29576edb54f490d2b6d08a2ba1809ad1be --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b760aa2dd69b8fa65121872e7a4f2c6c6d987049 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_le_Scalar_out : public at::meta::structured_le_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_le_Tensor_out : public at::meta::structured_le_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/le_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc10a8f4063865c35129a219fc2976a3a982047 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/le_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API le_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API le_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "le.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API le_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "le.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API le__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..e45cd2273c6dc23a351b474181342659f33f0bfc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} + +// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor +inline at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu::call(self, negative_slope); +} + +// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) +inline at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_::call(self, negative_slope); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d6803a77baf086891adb2b72d7ee62b3f0deba4f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3809d15b75fce1bca17bfbbcc6fd8f75aa2cff1f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9e7cae1fc4d7659723dbeebafca488f969def75a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c97d9b7b29ed607d5242b7918a4a7827f20d6c3e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f3012b360fb73c548206d02d0c90b97b8566627a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_leaky_relu_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91bb28867322e5c416276213c26e1f5b9db0187c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c7af963f66535c96a55ff87b85062cadd29e9d8d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_leaky_relu_backward_out : public at::meta::structured_leaky_relu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..060904c93362bdded855f8ec87e784df215edab6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API leaky_relu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); +}; + +struct TORCH_API leaky_relu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ca01f7f8231e08ec53eb2bc31be3bf175e564f7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d89378e5411a4deef6375b6ec0db0dd17f489e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71fb57fec0b992d8fae2862c47c4837771e0156e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8ac71d4bff24c30aed0023f6180b3f102a591973 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_leaky_relu : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & negative_slope); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e9105b2b90139d93d893e83899109ed13f09def --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..914a4235d753c2e12471836c992bf9bee7026e63 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_leaky_relu_out : public at::meta::structured_leaky_relu { +void impl(const at::Tensor & self, const at::Scalar & negative_slope, const at::Tensor & out); +}; +TORCH_API at::Tensor leaky_relu_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_quantized_cpu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9592bb0c1d9b9249fc285834dedd404fcb6d2053 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/leaky_relu_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API leaky_relu_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +}; + +struct TORCH_API leaky_relu { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & negative_slope); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope); +}; + +struct TORCH_API leaky_relu_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & negative_slope); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..09193a64ef8ff77ecdd34d5004e049451ee457a7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} + +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar::call(self, end, weight); +} + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor::call(self, end, weight); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b17c749ac1dd9c772fa35b89f128eed7406bdb7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c142ec781bfcba26fcd00dc02bd14772af3c58ba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..19e22c9067533f2e94b5198fc8f3fe4d75129c0b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a9cd0ad0488e4c6bf0331c612217837622ef5ffa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lerp_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; +struct TORCH_API structured_lerp_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7caac039051020b0ee4818f01a91de73dd68665a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..26637159e0c579e6ec6585059092924a7352f55f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lerp_Scalar : public at::meta::structured_lerp_Scalar { +void impl(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, const at::Tensor & out); +}; +struct TORCH_API structured_lerp_Tensor : public at::meta::structured_lerp_Tensor { +void impl(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e108d816361a454423a12a8cc04de6e1577eae6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lerp_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lerp__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +struct TORCH_API lerp_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less.h new file mode 100644 index 0000000000000000000000000000000000000000..b97ea699ec2d2e94c5fc2301b73bc63c869c9f72 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar_out::call(self, other, out); +} +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_Scalar_out::call(self, other, out); +} + +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor less(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar::call(self, other); +} + +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor_out::call(self, other, out); +} +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_Tensor_out::call(self, other, out); +} + +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor less(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21e33567e26055b5b5b9e090ff83e948e5b0e4b3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor less(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal.h new file mode 100644 index 0000000000000000000000000000000000000000..5e5573d5570891912f055eef9d8c6236c2c1072d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar_out::call(self, other, out); +} +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_equal_Scalar_out::call(self, other, out); +} + +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar::call(self, other); +} + +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor_out::call(self, other, out); +} +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_equal_Tensor_out::call(self, other, out); +} + +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6cbf3218857e3cf65a3cba0b653b32876f39a1ee --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f543e3cb191a6ca9f6c175ca7c3934ba34e759ad --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fc7aaa742ac8b240567b1164e01ad6e202ae7f2c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_equal_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API less_equal_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_equal.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_equal.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API less_equal__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5e7de14fcb8b7709965856e5ed2dccc5ccd46397 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor less(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/less_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..42907cdcbf50b56d6ce7794e47dd18803a57ecd6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/less_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API less_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API less_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API less_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API less__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma.h new file mode 100644 index 0000000000000000000000000000000000000000..58bd5ad071e6aee66b383013e009034692603e40 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lgamma_out::call(self, out); +} +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lgamma_out::call(self, out); +} + +// aten::lgamma(Tensor self) -> Tensor +inline at::Tensor lgamma(const at::Tensor & self) { + return at::_ops::lgamma::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0005e3e2c37b060d5bf0a70d54520a45f6e83aae --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a298524ff3608369a915e4631c7fa79d128327de --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a8b741629d2bd15e2e521aa0690370ff88afdf70 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a74ffa7ba66d2f664fa6eb25cb6a022f2fccda58 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lgamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9488dbb0fbd7e2727fc8ef58bd85e0060a01e017 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..334547b18ddf816d25b72d170d25636dbb2300da --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lgamma_out : public at::meta::structured_lgamma { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7481771bbcf7110b21df4891c23ec96e229e9092 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lgamma_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lgamma_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API lgamma_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lgamma_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API lgamma { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lgamma(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift.h new file mode 100644 index 0000000000000000000000000000000000000000..b20ae96d9550faf14953337226e7e5245d0de521 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift(Tensor self) -> Tensor +inline at::Tensor lift(const at::Tensor & self) { + return at::_ops::lift::call(self); +} + +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_out::call(self, out); +} +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3cb29f60ea300d3cd4409655df4756ab7b4fa49f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor lift(const at::Tensor & self); +TORCH_API at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh.h new file mode 100644 index 0000000000000000000000000000000000000000..dffb46b1571964e9ed9c26aa423dd467414c5f27 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift_fresh(Tensor(a) self) -> Tensor(a) +inline at::Tensor lift_fresh(const at::Tensor & self) { + return at::_ops::lift_fresh::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..212075e1845b2dcb3b285394b2f629fe076d6c08 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor lift_fresh(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..f79d783ab8a6a92cde2b4c72e4f802ee57bd6627 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift_fresh_copy(Tensor self) -> Tensor +inline at::Tensor lift_fresh_copy(const at::Tensor & self) { + return at::_ops::lift_fresh_copy::call(self); +} + +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_fresh_copy_out::call(self, out); +} +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_fresh_copy_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..152b5e3aff4776a7447ada08e35dad52330ef4ab --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a8a1f2bd7ccebeb3c93257197b401fcba78d287 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..807f054ab691760c06be167f1b0130d70ca17a39 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & lift_fresh_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3673e63661b0d05ca3a5e0aa14fd42d9bbd740d6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift_fresh_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift_fresh_copy(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_fresh_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c83d9d31601a67b72d3c5a9e7e9287866970140d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lift_fresh(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9c0566c5cce9b3dfc180a50acc5bf256d736ed81 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_fresh_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift_fresh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift_fresh(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9bc4ff7dd8a7b7c29380592d59e3f6f0ed4b7b92 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lift(const at::Tensor & self); +TORCH_API at::Tensor & lift_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..086ad19529662b7102f65acc67d02e919e96ba8e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lift_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky.h new file mode 100644 index 0000000000000000000000000000000000000000..51233a144d0d241071c45f29692c440fb89020d8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor +inline at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::call(self, upper); +} + +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ed2daa57fe6c3c2af40c2bb59d14de6ee3dab76 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..dc0842e5b5092693c002167e6ad3b6cfe26dacc1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) +inline ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors); +} + +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0565d56eb29fd15e332f5d443a50e49b1ead87d8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..08869f0d54a674185c31388076c91183a0a33936 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bdc8da341fe3cf62d96334f8c3dc6611d9362373 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..906ae798b3841e4597ac892cdebef5d0c64bf0bb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_cholesky_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, bool upper, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0dbbe3c9488f696a3b655de163bdf3064caea085 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..22efa1f352d939f6c8620a09d9e36394c518a4bb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cholesky_ex_out : public at::meta::structured_linalg_cholesky_ex { +void impl(const at::Tensor & self, bool upper, bool check_errors, const at::Tensor & L, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6b598083302da43f871b97d86fc2cfb79fbc056f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cholesky_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)"; + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors); +}; + +struct TORCH_API linalg_cholesky_ex_L { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky_ex"; + static constexpr const char* overload_name = "L"; + static constexpr const char* schema_str = "linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)"; + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b68b184a7f06c483c4bbdd2f7b3b82ecad217c8b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bac6a9be199fec4db16a1da634be4b39df8e3c14 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cholesky { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, bool upper); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper); +}; + +struct TORCH_API linalg_cholesky_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, bool upper, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond.h new file mode 100644 index 0000000000000000000000000000000000000000..15f8d0ef3c47e0e31623c06344ecf4ae629a7d8c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond::call(self, p); +} + +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond_out::call(self, p, out); +} +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, const ::std::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::call(self, p, out); +} + +// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::call(self, p); +} + +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..34aae7f53a9fe76d91341c2613b459595f9f5a3e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_native.h new file mode 100644 index 0000000000000000000000000000000000000000..845261ed472ffa1fbe286907fec6e5c3c9b90d91 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_out(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_out(const at::Tensor & self, c10::string_view p, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..37f57492fb190aad14fc370c32fca8dfd9dd5bae --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cond_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cond { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cond(Tensor self, Scalar? p=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const ::std::optional & p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p); +}; + +struct TORCH_API linalg_cond_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +}; + +struct TORCH_API linalg_cond_p_str { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "p_str"; + static constexpr const char* schema_str = "linalg_cond.p_str(Tensor self, str p) -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p); +}; + +struct TORCH_API linalg_cond_p_str_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "p_str_out"; + static constexpr const char* schema_str = "linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross.h new file mode 100644 index 0000000000000000000000000000000000000000..191e34209356638d6590a112c86f674cb53fb950 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor +inline at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross::call(self, other, dim); +} + +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross_out::call(self, other, dim, out); +} +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_cross_out::call(self, other, dim, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4e2ee1b2fcebe28755deeecdf898b4972bf6de1d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4047d55ae31f69377b383dfece1cccc1521f0b54 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c082037e97d78b8c42d9b799b8754c2056c581ba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d49d1b9f859d8f1584e44afbc489db72f237dc0a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_cross : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & other, int64_t dim); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3180c6eda9ce4178794d164c0e536d1d5e8c6273 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0546aca62ad3b9bfbb8fe1f4a061761e5037dde5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cross_out : public at::meta::structured_linalg_cross { +void impl(const at::Tensor & self, const at::Tensor & other, int64_t dim, const at::Tensor & out); +}; +TORCH_API at::Tensor linalg_cross_zerotensor(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f2f4061978dcafb0eb747070e631c316f3dba60f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_cross_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cross { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cross"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim); +}; + +struct TORCH_API linalg_cross_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cross"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det.h new file mode 100644 index 0000000000000000000000000000000000000000..496ad26291c1818f17159ce42b43aaddda02f080 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_det(Tensor A) -> Tensor +inline at::Tensor linalg_det(const at::Tensor & A) { + return at::_ops::linalg_det::call(A); +} + +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_det_out::call(A, out); +} +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_det_out::call(A, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..32d7a4890c9b4f78d2e41b1904828a713f7b0106 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_det(const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4c1f8c98e7428d389f055b14126cea711dbe9bb2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_det(const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..105ed830555f7b42a471f34521c7211d4ff13f6f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_det_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_det { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_det"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_det(Tensor A) -> Tensor"; + static at::Tensor call(const at::Tensor & A); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_det_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_det"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal.h new file mode 100644 index 0000000000000000000000000000000000000000..2ba92aa8757c02451655145a6fdf78cd47a496d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) +inline at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f7e04d50c354f743c4ea427d83529a093fe7cc8a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e8a45c4f8a1d20d9d23a21162eb4e7395aa3791e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1dfecb0f9bfa1e39cfc88b490f2e6afc1c5b1b82 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_diagonal { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_diagonal"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig.h new file mode 100644 index 0000000000000000000000000000000000000000..d65f0b8555303547abd599098d0b779043160a49 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple linalg_eig(const at::Tensor & self) { + return at::_ops::linalg_eig::call(self); +} + +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) { + return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors); +} +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4a238b878fd59d4b324da7bceb7fe2ab2486c2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06425d06a910eb4d0573d6ab8055203ff17c71d6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_native.h new file mode 100644 index 0000000000000000000000000000000000000000..632f4418919521a35f0cabfdea51608b27ce67c4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8d7147ebc4d0984ff968ba23b4e1227fd4fc8225 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eig_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eig { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eig"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eig_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eig"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh.h new file mode 100644 index 0000000000000000000000000000000000000000..62a7fe33625bb706620312f30ed240a61066232d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh::call(self, UPLO); +} + +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs); +} +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { + return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..405a902229fca9a415c7cb7c2eb664c8234d4139 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9fac55b1eda5c0ae4185d29b5bb394015430ba4f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e60ee099c45cfd722d94354c0e108378a4a9977c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigh { + using schema = ::std::tuple (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, c10::string_view UPLO); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO); +}; + +struct TORCH_API linalg_eigh_eigvals { + using schema = ::std::tuple (const at::Tensor &, c10::string_view, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigh"; + static constexpr const char* overload_name = "eigvals"; + static constexpr const char* schema_str = "linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals.h new file mode 100644 index 0000000000000000000000000000000000000000..2f0d501517fccb493d2238d20c9f83af48b755e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigvals(Tensor self) -> Tensor +inline at::Tensor linalg_eigvals(const at::Tensor & self) { + return at::_ops::linalg_eigvals::call(self); +} + +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_eigvals_out::call(self, out); +} +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_eigvals_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9928aee5101832efb3e6c42e7938f58143dc2891 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f69f980817fc3c04fb03e18ee6b4de7cd93866f8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5a8d9a9cde8769f34eda20828056f1551f6b33f0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cb89253e4c0fcc0b9aa494e34c2093c1e1468199 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d1c7b9e55469989f94c2c81df6781f4f359bec18 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvals { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvals"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigvals(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eigvals_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvals"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h new file mode 100644 index 0000000000000000000000000000000000000000..0168f498c6f39b0d4dcf52f2aeb9ee9659a72915 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor +inline at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh::call(self, UPLO); +} + +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out); +} +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { + return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4f917f9aea8f7cfa9d277d9b1a49d1e55c4846e7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4eb84fd02a903c20f38af90bc106baf773dd908d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1bc40a87ccaae732982647e34a0a90a5a7f4b603 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvalsh { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvalsh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view UPLO); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO); +}; + +struct TORCH_API linalg_eigvalsh_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvalsh"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product.h new file mode 100644 index 0000000000000000000000000000000000000000..26d50accd2719698a0092143b009b4ed9bead0d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor +inline at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product::call(input, tau); +} + +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product_out::call(input, tau, out); +} +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) { + return at::_ops::linalg_householder_product_out::call(input, tau, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c2b2b3805e9449482aae1087c933cbf2865bac27 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa57147b33b882e0b1d5dc05fb32511bc9ba4dc8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h new file mode 100644 index 0000000000000000000000000000000000000000..38e89da9c15294acd530a5416181f410a235701c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..013c43372bb6a8a018ff24eb947535a06be0ea0d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_householder_product { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_householder_product"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_householder_product(Tensor input, Tensor tau) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & tau); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau); +}; + +struct TORCH_API linalg_householder_product_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_householder_product"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv.h new file mode 100644 index 0000000000000000000000000000000000000000..ad86f725c067a9737c471cf9caccd9e15860bb58 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_inv(Tensor A) -> Tensor +inline at::Tensor linalg_inv(const at::Tensor & A) { + return at::_ops::linalg_inv::call(A); +} + +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_inv_out::call(A, out); +} +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_inv_out::call(A, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36389ef04bdec20da249a317aca4f220e8cf0f1c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..52202d8a66d1ad7688bff17e1df820f3bc02392f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) +inline ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex::call(A, check_errors); +} + +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6c96cf76734e9d1cd833c997f997e0e800b19ce3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0663fa534d59acfc6dd5e7418119939d237666a4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f63198273c05b3017aa36e67ad58437d6326e945 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8bc262b377f700a71ad9ce5e711bbcc9848d464d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_inv_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2219f8b25fa0869e2ab3e84eaa9e8d3bac04cd6c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..92bdc90e1dbd3f7eb6ae54b49a253c42867716d7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_inv_ex_out : public at::meta::structured_linalg_inv_ex { +void impl(const at::Tensor & A, bool check_errors, const at::Tensor & inverse, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..811d46395f520d0637c15a0841ae388da5eb712b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv_ex { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)"; + static ::std::tuple call(const at::Tensor & A, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors); +}; + +struct TORCH_API linalg_inv_ex_inverse { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv_ex"; + static constexpr const char* overload_name = "inverse"; + static constexpr const char* schema_str = "linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)"; + static ::std::tuple call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a740fa733baceae04b930c22578d1280699145cb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9ae3120a040bed052df8b3025c31e2ddd8482422 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_inv_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_inv(Tensor A) -> Tensor"; + static at::Tensor call(const at::Tensor & A); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_inv_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..00d61bbd0b7875f4074a1a2622434396c872aff1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) +inline ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor::call(self, hermitian); +} + +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +inline ::std::tuple linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots); +} +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +inline ::std::tuple linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { + return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a96a6b002f127b2c246b6fe19c7ae00f398f5f6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..eb6e8549bdae2863e72d0f6aa29ee0672800e03c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) +inline ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors); +} + +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71af3627e8d88c93aabb52abac14a3996e3fec82 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a1d68cf68b95b8a2746c564fef1f587317dbad24 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f7d150eb5aba1c9cbef1395a5e7c3d178b18df18 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..872ac0db58d48aaadcf8ac5783e02dc4b5bf1db2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_ldl_factor_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, bool hermitian, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce02d63b7a46453bfb894aee57f97286d8190f3e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1e60e251d1584e28d696253c2245d11d17cac249 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_factor_ex_out : public at::meta::structured_linalg_ldl_factor_ex { +void impl(const at::Tensor & self, bool hermitian, bool check_errors, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7bd8fb99977ec285c000325549e3507608eca594 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_factor_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors); +}; + +struct TORCH_API linalg_ldl_factor_ex_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor_ex"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..94935d2cbbe0e257e2b0f645e740c6925f0bdd73 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e04a866d1cc896df1610317dac171df1250592d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_factor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian); +}; + +struct TORCH_API linalg_ldl_factor_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..6d711c88169c0f98d9aef54c9cad4e2074efe7d6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor +inline at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian); +} + +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a72e563605976060608d79842dcbeca214d1a38c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ebe852fb14f7f6b7e7f4b210f4845b11c091fa03 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..410422b7b3cfb268a8ba740f0e2ed3d76f672cfe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d988f5ec001b5745624d8e00540975c3974dc6c4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_ldl_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d652c65741105d0f18e4980a3c1de78dbdf0ff0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fdfdf1fdeea13f955bc293ef2720061939cdcf5d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_solve_out : public at::meta::structured_linalg_ldl_solve { +void impl(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..29b84d0e52f49129e4a8a0220ddc4b2a47236173 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +struct TORCH_API linalg_ldl_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq.h new file mode 100644 index 0000000000000000000000000000000000000000..b306da0c8cc8c32a18ac8fe9328ab84f2e904c55 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) +inline ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); +} + +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0d05ee2a16e80adb7bc5bb24ebd391c787d52edc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d3c28a7849c38032691973958b01685f33e7ee88 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..deb4ea83efc24441c418478d6e68281b2bff1924 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..68d4a350b1c4c370d8fbb206225d5108fb3a54aa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_out(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..288a77c6cecea5e044ddeeaba97e6b65cf4fd563 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lstsq { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lstsq"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver); +}; + +struct TORCH_API linalg_lstsq_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lstsq"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu.h new file mode 100644 index 0000000000000000000000000000000000000000..578b388fb20110ce8db7648d132b92b7eddb1cb0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu::call(A, pivot); +} + +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e61207aa7e371c4fe09414b8f9b257a20fe3f880 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c548bddeddc2f7e52c958b6129b0a1c9eaaa0454 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e42e61200e83c711b5fe3b0e67407e9704b044d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..4f52c6aafc932e30781b2b8d2994a4d4e7e01ee1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) +inline ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::call(A, pivot); +} + +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..09053a142d5fc072a21d43257b5f6d890769e316 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..ed0fc4becacf6c82bf68426d3af590172122543a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) +inline ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors); +} + +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info); +} +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44618427c924f694cded046ca77cb56995c577da --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f52587413fad82eaf857e5c521acccb8b3741e26 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f994b5c94194a872f39326b45f543cd699554f92 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2a05afc7de04fd7ee90c962ff3e8b51c6b753878 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_factor_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..117d5dbfddb7b7731854647a0f701d0414490170 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c32fccf5d64a00fc4f980db9f1acc54eef751577 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_factor_ex_out : public at::meta::structured_linalg_lu_factor_ex { +void impl(const at::Tensor & A, bool pivot, bool check_errors, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..444b554d32abaca6841030a097daf920b6660e67 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_factor_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors); +}; + +struct TORCH_API linalg_lu_factor_ex_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor_ex"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e67d1cf8c57be47887a4fd2a4a525abffe47a0a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_out(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..24d95aea8f70191e9b37a642ad07373f0a0f0511 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_factor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)"; + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_factor_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a29d09c1cb5839e975d2e37bbe2fe09e882aa7fe --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f2318e6ad23310ca98957d75e03224aae9a2c13 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bcb6719c20d9b7d1c07d9f5a13a08c856c4a4211 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_out : public at::meta::structured_linalg_lu { +void impl(const at::Tensor & A, bool pivot, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f0099fa198bac262f350a56a50c7851bb4ed6fb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)"; + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..0b340aa0bac7745f43073e011df5727d6c79939c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor +inline at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint); +} + +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out); +} +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) { + return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..92be9c539e41625907089bcc9a1da948fdc8fb31 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb6b59103f099b052ea866160ee5c6a6bce00fbd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dda7ca7ff1c40a1c2f689eaa806aaaea10db0ed0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c072abc767e396646e16109397f303b1cc8b5059 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f01c5f501cf420a79fe2b397748e8d51e3b928e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..175331df32d7ed53673d18fd29a085b34bcb791c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_solve_out : public at::meta::structured_linalg_lu_solve { +void impl(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bbfba8ffbfb6e69b9d74e85fcfcbdda160e645ed --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor"; + static at::Tensor call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +struct TORCH_API linalg_lu_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..b7029404e6fd0c30f5d49230d15a848a69b22d37 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul::call(self, other); +} + +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c49dc22363b322b1b105390ed3feba5753752e1d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..536dc3e5187c501af0e8e5053a43d345d459164c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..789175c4168e32f0374920bf967e26ea999e4461 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matmul"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matmul(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API linalg_matmul_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matmul"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h new file mode 100644 index 0000000000000000000000000000000000000000..024efd6effac27dd620f3221f09801acbf3c86c8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_exp(Tensor self) -> Tensor +inline at::Tensor linalg_matrix_exp(const at::Tensor & self) { + return at::_ops::linalg_matrix_exp::call(self); +} + +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c402efeb532078923c1f170a7c46f98f173b54d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6c29fb2e47191cf4e1cc4e59bc04131e5be000df --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_matrix_exp(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30869157a89b8b801de18756fbb86c6db0f18cac --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_matrix_exp(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f60d48ec64e7ebba95117f1fe689e4f5a5bc8737 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & linalg_matrix_exp_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_exp(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d68f53c681435de9c74773cf7f041b45c7834bc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_exp { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_exp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matrix_exp(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_matrix_exp_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_exp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..132d80ab4294f3ad6df8246bea424b5ce6ea1924 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out); +} + +// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1da6a227b66dbe2c21e4f9f3e9f8662834f76534 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8e6bbdf03ee5739bd1e85269c5bfcbf73c7d9371 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..40009e5947342bd84f71a0714990b2d03e20ebc2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_norm_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_norm { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_matrix_norm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_norm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API linalg_matrix_norm_str_ord { + using schema = at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_norm"; + static constexpr const char* overload_name = "str_ord"; + static constexpr const char* schema_str = "linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_matrix_norm_str_ord_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_norm"; + static constexpr const char* overload_name = "str_ord_out"; + static constexpr const char* schema_str = "linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power.h new file mode 100644 index 0000000000000000000000000000000000000000..0602c9c8e6da0b57f2c7168b3948e58f1a2581d1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_power(Tensor self, int n) -> Tensor +inline at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power::call(self, n); +} + +// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power_out::call(self, n, out); +} +// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::linalg_matrix_power_out::call(self, n, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb182d7d45661aeced4a9adf6e6be23120929ed0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & linalg_matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & linalg_matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f126196a8d46986845928f1519a626adebff08c7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & linalg_matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bbb6e5818f6734ddf826b0146d08e036f0e27ad9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_power { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_power"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matrix_power(Tensor self, int n) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n); +}; + +struct TORCH_API linalg_matrix_power_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_power"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank.h new file mode 100644 index 0000000000000000000000000000000000000000..b2a50d4834439a99e3ad691e960c741ca94d4d5a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_matrix_rank(const at::Tensor & input, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian); +} + +// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input, atol, rtol, hermitian, out); +} +// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input, atol, rtol, hermitian, out); +} + +// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_matrix_rank(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian); +} + +// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} +// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} + +// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor +inline at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank::call(self, tol, hermitian); +} + +// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out::call(self, tol, hermitian, out); +} +// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out::call(self, tol, hermitian, out); +} + +// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor +inline at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian); +} + +// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::call(input, tol, hermitian, out); +} +// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::call(input, tol, hermitian, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..064b8e35917788c765732549f8fe4818e05c20af --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & input, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, double tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9e60d3dcd0326fc9f6f6e0a5c4563eb49e1fe3ef --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & input, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & self, ::std::optional atol=::std::nullopt, ::std::optional rtol=::std::nullopt, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian=false); +TORCH_API at::Tensor & linalg_matrix_rank_out(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e23a51420c0a2f6e652d687a0e3d071d59adbfdc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_matrix_rank_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_rank_atol_rtol_tensor { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "atol_rtol_tensor"; + static constexpr const char* schema_str = "linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian); +}; + +struct TORCH_API linalg_matrix_rank_atol_rtol_tensor_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "atol_rtol_tensor_out"; + static constexpr const char* schema_str = "linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_matrix_rank_atol_rtol_float { + using schema = at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "atol_rtol_float"; + static constexpr const char* schema_str = "linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian); +}; + +struct TORCH_API linalg_matrix_rank_atol_rtol_float_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "atol_rtol_float_out"; + static constexpr const char* schema_str = "linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_matrix_rank { + using schema = at::Tensor (const at::Tensor &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, double tol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian); +}; + +struct TORCH_API linalg_matrix_rank_out { + using schema = at::Tensor & (const at::Tensor &, double, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_matrix_rank_tol_tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "tol_tensor"; + static constexpr const char* schema_str = "linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & tol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian); +}; + +struct TORCH_API linalg_matrix_rank_out_tol_tensor { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matrix_rank"; + static constexpr const char* overload_name = "out_tol_tensor"; + static constexpr const char* schema_str = "linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot.h new file mode 100644 index 0000000000000000000000000000000000000000..b15c31dff4599648b94a07007c873a76b5af0ee4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor +inline at::Tensor linalg_multi_dot(at::TensorList tensors) { + return at::_ops::linalg_multi_dot::call(tensors); +} + +// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_multi_dot_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::linalg_multi_dot_out::call(tensors, out); +} +// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_multi_dot_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::linalg_multi_dot_out::call(tensors, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3fb97d1acc87610fa142f1b6d48dca72f12e2ae3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_multi_dot(at::TensorList tensors); +TORCH_API at::Tensor & linalg_multi_dot_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & linalg_multi_dot_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ab024d836fe9ea4c4a8e80ee8a63841c7746d2ab --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_multi_dot(at::TensorList tensors); +TORCH_API at::Tensor & linalg_multi_dot_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1a50a55a861264694a875292303c9d64e6426281 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_multi_dot_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_multi_dot { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_multi_dot"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_multi_dot(Tensor[] tensors) -> Tensor"; + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API linalg_multi_dot_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_multi_dot"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..96acc885824980b330da130cc80be4b020e03d4c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_norm(const at::Tensor & self, const ::std::optional & ord=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & ord=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_norm_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_norm_outf(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_out::call(self, ord, dim, keepdim, dtype, out); +} + +// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_norm_ord_str_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_ord_str_out::call(self, ord, dim, keepdim, dtype, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ec91f3943858dd25b25343a45e3dfe3d9f86f94 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_norm(const at::Tensor & self, const ::std::optional & ord=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & ord=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_outf(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..46885be4c08e4dfb008fb77f61e6c5bac87523e6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_norm(const at::Tensor & self, const ::std::optional & ord=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_out(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_norm_out(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2eeae5fa3c49d1985382401c3a7ce7bd116f05e7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_norm_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_norm { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_norm_ord_str { + using schema = at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_norm"; + static constexpr const char* overload_name = "ord_str"; + static constexpr const char* schema_str = "linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_norm_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_norm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API linalg_norm_ord_str_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_norm"; + static constexpr const char* overload_name = "ord_str_out"; + static constexpr const char* schema_str = "linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv.h new file mode 100644 index 0000000000000000000000000000000000000000..acd6924bfd45688eb922d3b5ab07b116cd8d83f7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian); +} + +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out); +} +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out); +} + +// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian); +} + +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} + +// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv::call(self, rcond, hermitian); +} + +// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian); +} + +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out); +} +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out); +} + +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out); +} +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..703ed6e5b8730b6b8ad5956e109a09571f3020dc --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_outf(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..94fc0cd6232349285259ade40df5d431099825f1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f0a1ce5820aec49595af983aaa3f6cf593e31a7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_outf(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af8ec5eda85b7b6efe51e46407f049113c40ce59 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const ::std::optional & atol={}, const ::std::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, ::std::optional atol=::std::nullopt, ::std::optional rtol=::std::nullopt, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2fb75f242da612368aab1c0ac18d269aee245738 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_pinv_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_pinv_atol_rtol_tensor { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "atol_rtol_tensor"; + static constexpr const char* schema_str = "linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian); +}; + +struct TORCH_API linalg_pinv_atol_rtol_tensor_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "atol_rtol_tensor_out"; + static constexpr const char* schema_str = "linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_pinv_atol_rtol_float { + using schema = at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "atol_rtol_float"; + static constexpr const char* schema_str = "linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian); +}; + +struct TORCH_API linalg_pinv_atol_rtol_float_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, ::std::optional, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "atol_rtol_float_out"; + static constexpr const char* schema_str = "linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_pinv { + using schema = at::Tensor (const at::Tensor &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, double rcond, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian); +}; + +struct TORCH_API linalg_pinv_rcond_tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "rcond_tensor"; + static constexpr const char* schema_str = "linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian); +}; + +struct TORCH_API linalg_pinv_out { + using schema = at::Tensor & (const at::Tensor &, double, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out); +}; + +struct TORCH_API linalg_pinv_out_rcond_tensor { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_pinv"; + static constexpr const char* overload_name = "out_rcond_tensor"; + static constexpr const char* schema_str = "linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr.h new file mode 100644 index 0000000000000000000000000000000000000000..85d0b693cd83ae0409a831ea2b75ace120a35b16 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) +inline ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr::call(A, mode); +} + +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +inline ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr_out::call(A, mode, Q, R); +} +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +inline ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) { + return at::_ops::linalg_qr_out::call(A, mode, Q, R); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6fae6674e97a3d931c3f2c2fa5e1f8a28ca99ba7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4c3f52ae8fc4dd2cda0d80d9492a479bc84e9738 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b118c5002dd48c677d6caa633bd24b0468abe22 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ce18f3482a32d9987749ad611a06db74f0649146 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_qr : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, c10::string_view mode); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c51247cd23d755cb4bc270a3887a10141fd75d4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..eb525d331bf0584d135e1334f853849b64f2b1f5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_qr_out : public at::meta::structured_linalg_qr { +void impl(const at::Tensor & A, c10::string_view mode, const at::Tensor & Q, const at::Tensor & R); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f73dfb08929a1e9e122cadbafde98710c6ec039a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_qr_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_qr { + using schema = ::std::tuple (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_qr"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)"; + static ::std::tuple call(const at::Tensor & A, c10::string_view mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode); +}; + +struct TORCH_API linalg_qr_out { + using schema = ::std::tuple (const at::Tensor &, c10::string_view, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_qr"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)"; + static ::std::tuple call(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet.h new file mode 100644 index 0000000000000000000000000000000000000000..cddfcc437c02468b2374503dc5cf57ee1d07fc68 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) +inline ::std::tuple linalg_slogdet(const at::Tensor & A) { + return at::_ops::linalg_slogdet::call(A); +} + +// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +inline ::std::tuple linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) { + return at::_ops::linalg_slogdet_out::call(A, sign, logabsdet); +} +// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +inline ::std::tuple linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) { + return at::_ops::linalg_slogdet_out::call(A, sign, logabsdet); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6865d2d7697e156c1cec7d9707a82c466507cd7f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A); +TORCH_API ::std::tuple linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h new file mode 100644 index 0000000000000000000000000000000000000000..25794d4d4f1e80032a2756db9b66811b8847396a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple linalg_slogdet_out(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3bcb80beacda57d50ad87116235af79982c99414 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_slogdet { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_slogdet"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)"; + static ::std::tuple call(const at::Tensor & A); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_slogdet_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_slogdet"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"; + static ::std::tuple call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..f61e382db5d6a43c6f336b9b09a625aafbbe8c40 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor +inline at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve::call(A, B, left); +} + +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve_out::call(A, B, left, out); +} +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { + return at::_ops::linalg_solve_out::call(A, B, left, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e2d5e1bf4dc0cfd5e4cbf95de291763fbcdda4a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true); +TORCH_API at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true); +TORCH_API at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..71daf5311b7b3e8ff902267b0bccd80b1a21088e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) +inline ::std::tuple linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex::call(A, B, left, check_errors); +} + +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +inline ::std::tuple linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info); +} +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +inline ::std::tuple linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { + return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c009d9a5d84d56a28cb1725e614ba34b59317905 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fd0959a6272c86d52d9fe6eae67b47c7eb579765 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_solve_ex_out(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61f99c876ddcbee1806524602438baa02ba71ba5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_solve_ex { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)"; + static ::std::tuple call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors); +}; + +struct TORCH_API linalg_solve_ex_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve_ex"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)"; + static ::std::tuple call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..08c3f4d0460509484334b969ac8e83f951318769 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true); +TORCH_API at::Tensor & linalg_solve_out(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..913108bc7dce04a1ebc8adb5ca89506911be7c2b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor"; + static at::Tensor call(const at::Tensor & A, const at::Tensor & B, bool left); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left); +}; + +struct TORCH_API linalg_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h new file mode 100644 index 0000000000000000000000000000000000000000..dff8ea95e6cd2c20a1867c65c9a2412a464e6db4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out); +} +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) { + return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out); +} + +// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor +inline at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa7a9af994a00b36eec5f86df6505eef1ad161c1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7098f2d66a3a0ea75f99a7e322a17e704824252f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3cbf0db749f15224e3ee4333ac40cbbaf4949c26 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e963b4ddd53ec2062f5455480bcdde9673542950 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_solve_triangular_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve_triangular"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); +}; + +struct TORCH_API linalg_solve_triangular { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_solve_triangular"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd.h new file mode 100644 index 0000000000000000000000000000000000000000..b247664c7492edbbc5aca7d0d5c6baf1e470aa6e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) +inline ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_svd::call(A, full_matrices, driver); +} + +// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +inline ::std::tuple linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_svd_U::call(A, full_matrices, driver, U, S, Vh); +} +// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +inline ::std::tuple linalg_svd_outf(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::linalg_svd_U::call(A, full_matrices, driver, U, S, Vh); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..75f671e7de8a91dcd74d3f7be375f544ee581e0d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_svd_outf(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe196e2e827dfcd77f4d5301e599eb9b75834a4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_svd_out(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..88e66dcfce7c551435e42d36faa259b55c5dd519 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svd_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_svd { + using schema = ::std::tuple (const at::Tensor &, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_svd"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)"; + static ::std::tuple call(const at::Tensor & A, bool full_matrices, ::std::optional driver); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional driver); +}; + +struct TORCH_API linalg_svd_U { + using schema = ::std::tuple (const at::Tensor &, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_svd"; + static constexpr const char* overload_name = "U"; + static constexpr const char* schema_str = "linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)"; + static ::std::tuple call(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals.h new file mode 100644 index 0000000000000000000000000000000000000000..2745001c91063c25205a15b96523069b5228b78f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor +inline at::Tensor linalg_svdvals(const at::Tensor & A, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_svdvals::call(A, driver); +} + +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_svdvals_out(at::Tensor & out, const at::Tensor & A, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_svdvals_out::call(A, driver, out); +} +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_svdvals_outf(const at::Tensor & A, ::std::optional driver, at::Tensor & out) { + return at::_ops::linalg_svdvals_out::call(A, driver, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37367f817916466857a762808d90af59665655e0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_svdvals(const at::Tensor & A, ::std::optional driver=::std::nullopt); +TORCH_API at::Tensor & linalg_svdvals_out(at::Tensor & out, const at::Tensor & A, ::std::optional driver=::std::nullopt); +TORCH_API at::Tensor & linalg_svdvals_outf(const at::Tensor & A, ::std::optional driver, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_native.h new file mode 100644 index 0000000000000000000000000000000000000000..16553ef450ba70b51d727bcbdee4dd98daac0d11 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_svdvals(const at::Tensor & A, ::std::optional driver=::std::nullopt); +TORCH_API at::Tensor & linalg_svdvals_out(const at::Tensor & A, ::std::optional driver, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a0450f559439b55119783c4cdb6b875d2cfbff96 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_svdvals { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_svdvals"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor"; + static at::Tensor call(const at::Tensor & A, ::std::optional driver); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, ::std::optional driver); +}; + +struct TORCH_API linalg_svdvals_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_svdvals"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, ::std::optional driver, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, ::std::optional driver, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv.h new file mode 100644 index 0000000000000000000000000000000000000000..494d6dc0c0e2f003a20594e4eb77eec01bdc2484 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor +inline at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv::call(self, ind); +} + +// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv_out::call(self, ind, out); +} +// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out) { + return at::_ops::linalg_tensorinv_out::call(self, ind, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da2a2bf761473f7b9ea78448555b35dae1aec907 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..92f8d37f76dbf93dbf2ae6e96f3635921b830b55 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..550d84e4a581b258912fea870a88813187041393 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_tensorinv { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_tensorinv"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_tensorinv(Tensor self, int ind=2) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t ind); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind); +}; + +struct TORCH_API linalg_tensorinv_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_tensorinv"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t ind, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve.h new file mode 100644 index 0000000000000000000000000000000000000000..ac5ebbab69e9d1a27d3a0c497b304a52a6983e24 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor +inline at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=::std::nullopt) { + return at::_ops::linalg_tensorsolve::call(self, other, dims); +} + +// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_tensorsolve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=::std::nullopt) { + return at::_ops::linalg_tensorsolve_out::call(self, other, dims, out); +} +// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_tensorsolve_outf(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { + return at::_ops::linalg_tensorsolve_out::call(self, other, dims, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d6b6bd9639225ff5d6331e13a3f979feb8ee77c8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=::std::nullopt); +TORCH_API at::Tensor & linalg_tensorsolve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=::std::nullopt); +TORCH_API at::Tensor & linalg_tensorsolve_outf(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..394c65ce2d18e0bcce48169f58b9319b623090d7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=::std::nullopt); +TORCH_API at::Tensor & linalg_tensorsolve_out(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..822d86da645705f8b178cbc6f11dcaddd3ccac93 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_tensorsolve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_tensorsolve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::OptionalIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_tensorsolve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims); +}; + +struct TORCH_API linalg_tensorsolve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::OptionalIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_tensorsolve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander.h new file mode 100644 index 0000000000000000000000000000000000000000..cc8a5efeb0e46f8ed1081f16db20e9670246a7c5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor +inline at::Tensor linalg_vander(const at::Tensor & x, ::std::optional N=::std::nullopt) { + return at::_ops::linalg_vander::call(x, N.has_value() ? ::std::make_optional(c10::SymInt(*N)) : ::std::nullopt); +} +namespace symint { + template >> + at::Tensor linalg_vander(const at::Tensor & x, ::std::optional N=::std::nullopt) { + return at::_ops::linalg_vander::call(x, N.has_value() ? ::std::make_optional(c10::SymInt(*N)) : ::std::nullopt); + } +} + +// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor +inline at::Tensor linalg_vander_symint(const at::Tensor & x, ::std::optional N=::std::nullopt) { + return at::_ops::linalg_vander::call(x, N); +} +namespace symint { + template >> + at::Tensor linalg_vander(const at::Tensor & x, ::std::optional N=::std::nullopt) { + return at::_ops::linalg_vander::call(x, N); + } +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e60269228a62fb2eb3266f3497317e460e91b1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_vander(const at::Tensor & x, ::std::optional N=::std::nullopt); +TORCH_API at::Tensor linalg_vander_symint(const at::Tensor & x, ::std::optional N=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd20fd59fa4d7a9fc0964c5feb37d5cffb5f922 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_vander_symint(const at::Tensor & x, ::std::optional N=::std::nullopt); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f1119062cf0dadf5218f263d22c06a51faabece2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vander_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_vander { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_vander"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor"; + static at::Tensor call(const at::Tensor & x, ::std::optional N); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, ::std::optional N); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot.h new file mode 100644 index 0000000000000000000000000000000000000000..8fd7fa403ed0857d4644f689a283d7744e6577d6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor +inline at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot::call(x, y, dim); +} + +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot_out::call(x, y, dim, out); +} +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_vecdot_out::call(x, y, dim, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c385207c0a6e0f144eec295769058f5ed31574d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim=-1); +TORCH_API at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1); +TORCH_API at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bef771bceea8e531ebd5eb65ff34ba76582e4185 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim=-1); +TORCH_API at::Tensor & linalg_vecdot_out(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..24d6460903ff27c12d4cd9f1d5039df7282d30c8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vecdot_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_vecdot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_vecdot"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor"; + static at::Tensor call(const at::Tensor & x, const at::Tensor & y, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim); +}; + +struct TORCH_API linalg_vecdot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_vecdot"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..9b8c90f9a1cbf2c00cf41f3271171ad1f530ab87 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_vector_norm_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_vector_norm_out::call(self, ord, dim, keepdim, dtype, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..351e6c39e2bf22fdb8aa3b4163ba333feaceccaa --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c5b3b83b6b991e16a9dfa9b0c4f5c9ec859f1d5 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..973e698489db1aa5c7032ecff10d18f538094d11 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..cc3734a7e3440bcde73723cb4810d4573edb0386 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_vector_norm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3eb9bb0fc9e1c6c594b99a966723588c58cfa5f8 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a73eb41076b22dcdf4a1726205a7562523c1a3bb --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_vector_norm_out : public at::meta::structured_linalg_vector_norm { +void impl(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8660d5d0c18fbb4a580f29a685c40a252f6e93b0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_vector_norm { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_vector_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_vector_norm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_vector_norm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..5d74e32c0e4b23b82881c7e5cf5c5e90985a819d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor +inline at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}) { + return at::_ops::linear::call(input, weight, bias); +} + +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linear_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}) { + return at::_ops::linear_out::call(input, weight, bias, out); +} +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linear_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out) { + return at::_ops::linear_out::call(input, weight, bias, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..faa23091f81982a31efd2708089b6a48cf4d0a8e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward::call(self, grad_output, weight, output_mask); +} + +// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2); +} +// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16e98080d8a220532daaaf164ec38071a70e4594 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +TORCH_API ::std::tuple linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd7298f1fa88842b66005c8c7fbe0a2eff65af06 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple nested_linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2565529e66baaca5771c324dd492f7f453d5ee75 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linear_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linear_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +}; + +struct TORCH_API linear_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linear_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f904a1652fbe86048297a4784dce0041b6942d7d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & linear_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}); +TORCH_API at::Tensor & linear_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e4b992a4f799a6c368e7be14753d08a67b65ee7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c7ff3440bcc5c9f6218dd3eefaf99cb1c7499a7c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}); +TORCH_API at::Tensor & linear_out(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); +TORCH_API at::Tensor nested_linear(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..60eb311b1febd7dfcb41f9e64f347417bd2edb47 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linear_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linear"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias); +}; + +struct TORCH_API linear_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linear"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace.h new file mode 100644 index 0000000000000000000000000000000000000000..25cf706746438a08a364e7317f121e621f51c75f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace.h @@ -0,0 +1,98 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_out::call(start, end, steps, out); +} +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_out::call(start, end, steps, out); +} + +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out); +} +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out); +} + +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out); +} +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out); +} + +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out); +} +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..efabda8e8f1fc2f5bfcf686060f19f86dfcdc18e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h @@ -0,0 +1,36 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b8c3b358d28c10b9183f705a3ae6f41c0ead82 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..703614c93309834b649ed50b412b18db8831ea1a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b5f56a78cb33bb0fd7e4f6ac91ccacbddc87765 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02af1887f40fa34bd5790504a906a555d169314d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & linspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor & linspace_cuda_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & linspace_out(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & linspace_out(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & linspace_out(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..530322fb75f61bd80622931abb4ec0ac7faf5351 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/linspace_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linspace { + using schema = at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Tensor"; + static constexpr const char* schema_str = "linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Scalar"; + static constexpr const char* schema_str = "linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Scalar_Tensor"; + static constexpr const char* schema_str = "linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_out { + using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Tensor_out"; + static constexpr const char* schema_str = "linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Scalar_out"; + static constexpr const char* schema_str = "linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Scalar_Tensor_out"; + static constexpr const char* schema_str = "linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log.h new file mode 100644 index 0000000000000000000000000000000000000000..a5eed7be2d8fa7d114899e4c041533a642e73ad2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log(Tensor self) -> Tensor +inline at::Tensor log(const at::Tensor & self) { + return at::_ops::log::call(self); +} + +// aten::log_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log_(at::Tensor & self) { + return at::_ops::log_::call(self); +} + +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_out::call(self, out); +} +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10.h new file mode 100644 index 0000000000000000000000000000000000000000..353c3a7b395fea2607ae2ccaca72f9cafdefe2d1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log10(Tensor self) -> Tensor +inline at::Tensor log10(const at::Tensor & self) { + return at::_ops::log10::call(self); +} + +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log10_(at::Tensor & self) { + return at::_ops::log10_::call(self); +} + +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log10_out::call(self, out); +} +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log10_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f69274c84448b06f38aa2b43b5ee47a29d952264 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..abcc8a1a2b2c935f619968a2e3790c46032b9110 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b0c00f856c3e8804e3eeb911c7bf9d35cb60f992 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9acd3ccb50a348def347a7d6be77dc64302637c3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log10 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f295d6066b5fdb663f5c603a8af2b07c70da133 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bd63fd725c2dfbb6878caf1a59612d5f8ed71c1c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log10_out : public at::meta::structured_log10 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3973c1b07a4b56a63e023c2079dbec6a4fcc216a --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log10_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log10 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log10(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log10_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log10_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log10_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p.h new file mode 100644 index 0000000000000000000000000000000000000000..35e5145128eb5d0f3f5b78dfdb59b21d206cffbf --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log1p(Tensor self) -> Tensor +inline at::Tensor log1p(const at::Tensor & self) { + return at::_ops::log1p::call(self); +} + +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log1p_(at::Tensor & self) { + return at::_ops::log1p_::call(self); +} + +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log1p_out::call(self, out); +} +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log1p_out::call(self, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15ea0538089b40e0be091aa636ec654bfbde899e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4b5de69930eecbaeab04118e9867f1d156f4c3f4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7a4ef76d1108516fe943da88cef929ceab6553 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eff5db54c5a06bc947ee7633e47ecaab3bd43ca1 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log1p : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..965ca3323275672f457230365d5ffa7c23936596 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..c3f148bf8da8d34123acdcafedee94daf1a71add --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_sigmoid_out::call(self, out); +} +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_sigmoid_out::call(self, out); +} + +// aten::log_sigmoid(Tensor self) -> Tensor +inline at::Tensor log_sigmoid(const at::Tensor & self) { + return at::_ops::log_sigmoid::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..43c9c03abc4b3a11cf300d5d83d2f2f9eb25662e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} + +// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor +inline at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4bfe173b9b41d29b58a47f0f9a0da67bded14e70 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e55966eacc45bd3ce33880b0bee6f085aaa3b83 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c3b377d8573cb63d44e5dfdd519a2d7a82805a0c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_sigmoid_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_cpu_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +TORCH_API at::Tensor log_sigmoid_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_cuda_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c598768a159cd3e21fab28715c62e88d1a83a4f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +}; + +struct TORCH_API log_sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8d825c8c906b34b8ac830e3449abe4187d6bba40 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor log_sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..350408a6813ea798036ce64d293e2cd3e8d5ec96 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward_output::call(self, output, buffer); +} +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) { + return at::_ops::log_sigmoid_forward_output::call(self, output, buffer); +} + +// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) +inline ::std::tuple log_sigmoid_forward(const at::Tensor & self) { + return at::_ops::log_sigmoid_forward::call(self); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a6fcdb46a9571fc5ccf45b6c8bf0495301364062 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple log_sigmoid_forward(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7591e4d5193c3c229fd51a44e410e458be379f0c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple log_sigmoid_forward(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a40723622e0da3dd45d6f40efaf14c2a926c7bdd --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple log_sigmoid_forward_cpu(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cpu(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +TORCH_API ::std::tuple log_sigmoid_forward_cuda(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cuda(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f410fa83bd6dcf07474ea429f1ddc6e0745534e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_forward_output { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_forward"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +}; + +struct TORCH_API log_sigmoid_forward { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_forward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e1878c3d0b42d0d615df467f9c6c3c63f10f94a0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3b2b4cbe0d38eb6ad69ddb8735893ff8234d9fc4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API log_sigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..24af20219f3c5d5e05b5b7d449009a704798092e --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_int::call(self, dim, dtype); +} + +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_int_out::call(self, dim, dtype, out); +} +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::log_softmax_int_out::call(self, dim, dtype, out); +} + +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_Dimname::call(self, dim, dtype); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a9b22193895ed66c7dbbab9f7031be7130f2a61 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e7232dd3430bcf5aa20515e75ddddcc2f50eae1f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a8b6521041a827bcf011133ce2fc3dec6ed01310 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & log_softmax_out(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..800a32cf24234375d6bc6dd4ef0547dd64eea928 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/log_softmax_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "int"; + static constexpr const char* schema_str = "log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +struct TORCH_API log_softmax_int_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "int_out"; + static constexpr const char* schema_str = "log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API log_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "Dimname"; + static constexpr const char* schema_str = "log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional dtype); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift.h new file mode 100644 index 0000000000000000000000000000000000000000..b0812b6b027cbc6bdf5c1e2ffbb30c9cb77fbf8d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar::call(self, other); +} + +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor::call(self, other); +} + +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar_out::call(self, other, out); +} +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__lshift___Scalar_out::call(self, other, out); +} + +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor_out::call(self, other, out); +} +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__lshift___Tensor_out::call(self, other, out); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13a74136b31e96ae1f20e5597931605648cd06d9 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e690813441b9f0fc0a22a458a733ad0a1503103c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3283c9470663c2aee26ca400b4f33320e8986d8d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06bd804ece09f44f78bf857a3fa42261e15cc953 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a7036fe0ebe4b43c0d56c5a90a3744ed824ba0d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & __lshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7a7c4fa0dce9d318381c3c5d9f6d47ae4f92e432 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lshift_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API __lshift___Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __lshift___Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __ilshift___Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__ilshift__"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __ilshift___Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__ilshift__"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __lshift___Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API __lshift___Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm.h new file mode 100644 index 0000000000000000000000000000000000000000..238e85680531c135ed4ef70d4383dd02640f23e2 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..c50f47950750d9bf8d7239622780641a1ca8514f --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +inline ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0ffd98d89de7581c9be15a05bba8e4b2c9dcd371 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72b218ac4480b9da83c4ae1a49149407a6cdb2c6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..96963b26a70e6ddc76102aa91e87d049364b3de3 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_cell_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_cell { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_cell"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..59fa904372e2447dab94b6ec4d0c7396aef4c573 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ce46ed4acc2cca93e76e423d992ace2e12b32ef0 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) +inline ::std::tuple,::std::vector> lstm_mps_backward(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +inline void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); +} +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +inline void lstm_mps_backward_outf(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c352cf170513d8fe88ca3bc0c01429a469a456b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API void lstm_mps_backward_outf(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc3a21e48b3ab083f9666f83fb3860fbdd0ab558 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void lstm_mps_backward_out(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f235d7e66cb2667103ce38e5d0db9485f763e1c --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_mps_backward { + using schema = ::std::tuple,::std::vector> (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_mps_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])"; + static ::std::tuple,::std::vector> call(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API lstm_mps_backward_out { + using schema = void (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_mps_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()"; + static void call(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0f6288273d2dcfa93df6c83d9386903603e2ba --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..db10b8e591574c165eadebd31708695ccbef9c68 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lstm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_input { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm"; + static constexpr const char* overload_name = "input"; + static constexpr const char* schema_str = "lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API lstm_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm"; + static constexpr const char* overload_name = "data"; + static constexpr const char* schema_str = "lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt.h new file mode 100644 index 0000000000000000000000000000000000000000..94cbedb44cdaddb30f267c4595fed9bd70601b09 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} + +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor lt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar::call(self, other); +} + +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor lt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor::call(self, other); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52ff9e42e321287b180344079973a57fbc790dea --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fbb5963053a30463eadc492fb25b1df531ffa05 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13e662d47c8e5e183cd2044412197d2efb73e1f6 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5a8941a89244f151b2e28368fb92a7a9fc65844b --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_lt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf0da30ba80eb27f766f0df6c3577c5f470f2867 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..80369f9746e1990821016b335a4ec0d8c3dd89ec --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lt_Scalar_out : public at::meta::structured_lt_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_lt_Tensor_out : public at::meta::structured_lt_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0172ae7ee08f8373ed4abdb9bc1e25a8fb48449d --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lt_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lt_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API lt_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lt.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API lt_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lt_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lt.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lt__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API lt__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..85340c37ad5e7915acdb0715f6171aee9310b2a7 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} + +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +inline at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve::call(self, LU_data, LU_pivots); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b91f7336e163587334943b0a21ee48ca205c660 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a3964cd2f5584ca2538547dfd771e24c064a24 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_out(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_ops.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bea36b2bcd1fdcc56d045a0f90874c437e43c572 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lu_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); +}; + +struct TORCH_API lu_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +}; + +}} // namespace at::_ops diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack.h new file mode 100644 index 0000000000000000000000000000000000000000..1e85f14adf568e2c079023dc604a98906ac72242 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots); +} + +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} + +} diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..587b6800924a796b74f8a76f20eaa4943d09d437 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a52e74fa2ab9b13ac07c07101761d39c90bae203 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cpu +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d70349c31f6e5b73ef6e391a73c353813be37dde --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cuda +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a863755cc1c1f367513f9807ce98d894081854b4 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lu_unpack : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); +}; + +} // namespace native +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bad6a050370b9d8bb5d6fae40665011d7f908f41 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_native.h b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c9383540a9f0c667fb762cda46f792fc482c4529 --- /dev/null +++ b/phivenv/Lib/site-packages/torch/include/ATen/ops/lu_unpack_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lu_unpack_out : public at::meta::structured_lu_unpack { +void impl(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at