/*** * ==++== * * Copyright (c) Microsoft Corporation. All rights reserved. * * ==--== * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ * * amp_graphics.h * * C++ AMP Graphics Library * * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- ****/ #pragma once #include #include #include #include #define _AMP_GRAPHICS_H namespace Concurrency { namespace graphics { namespace details { #pragma warning( push ) #pragma warning( disable : 6326 ) // Potential comparison of a constant with another constant template struct _Short_vector_type_traits { typedef void _Scalar_type; static const bool _Is_valid_SVT_for_texture = false; static const _Short_vector_base_type_id _Format_base_type_id = _Invalid_type; static const unsigned int _Num_channels = 0; static const unsigned int _Default_bits_per_channel = 0; }; template<> struct _Short_vector_type_traits { typedef unsigned int _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Uint_type; static const unsigned int _Num_channels = 1; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef uint_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Uint_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef uint_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Uint_type; static const unsigned int _Num_channels = 3; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef uint_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Uint_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef int _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Int_type; static const unsigned int _Num_channels = 1; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef int_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Int_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef int_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Int_type; static const unsigned int _Num_channels = 3; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef int_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Int_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef float _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Float_type; static const unsigned int _Num_channels = 1; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef float_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Float_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef float_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Float_type; static const unsigned int _Num_channels = 3; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef float_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Float_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef unorm _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Unorm_type; static const unsigned int _Num_channels = 1; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef unorm_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Unorm_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef unorm_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = false; static const _Short_vector_base_type_id _Format_base_type_id = _Invalid_type; static const unsigned int _Num_channels = 0; static const unsigned int _Default_bits_per_channel = 0; }; template<> struct _Short_vector_type_traits { typedef unorm_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Unorm_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef norm _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Norm_type; static const unsigned int _Num_channels = 1; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef norm_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Norm_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef norm_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = false; static const _Short_vector_base_type_id _Format_base_type_id = _Invalid_type; static const unsigned int _Num_channels = 0; static const unsigned int _Default_bits_per_channel = 0; }; template<> struct _Short_vector_type_traits { typedef norm_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Norm_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 16; }; template<> struct _Short_vector_type_traits { typedef double _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Double_type; static const unsigned int _Num_channels = 2; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef double_2::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = true; static const _Short_vector_base_type_id _Format_base_type_id = _Double_type; static const unsigned int _Num_channels = 4; static const unsigned int _Default_bits_per_channel = 32; }; template<> struct _Short_vector_type_traits { typedef double_3::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = false; static const _Short_vector_base_type_id _Format_base_type_id = _Invalid_type; static const unsigned int _Num_channels = 0; static const unsigned int _Default_bits_per_channel = 0; }; template<> struct _Short_vector_type_traits { typedef double_4::value_type _Scalar_type; static const bool _Is_valid_SVT_for_texture = false; static const _Short_vector_base_type_id _Format_base_type_id = _Invalid_type; static const unsigned int _Num_channels = 0; static const unsigned int _Default_bits_per_channel = 0; }; template unsigned int _Get_default_bits_per_scalar_element() { return _Short_vector_type_traits<_Short_vector_type>::_Format_base_type_id == _Double_type ? _Short_vector_type_traits<_Short_vector_type>::_Default_bits_per_channel * 2 : _Short_vector_type_traits<_Short_vector_type>::_Default_bits_per_channel; } template std::array _Get_dimensions(const Concurrency::extent<_Rank> & _Ext, unsigned int _Mip_offset) { std::array _Arr; // For un-used dimensions, use value 1. switch((_Rank)) { case 1: _Arr[0] = static_cast((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U); _Arr[1] = 1; _Arr[2] = 1; break; case 2: _Arr[0] = static_cast((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U); _Arr[1] = static_cast((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U); _Arr[2] = 1; break; case 3: _Arr[0] = static_cast((_Ext[2] >> _Mip_offset) ? (_Ext[2] >> _Mip_offset) : 1U); _Arr[1] = static_cast((_Ext[1] >> _Mip_offset) ? (_Ext[1] >> _Mip_offset) : 1U); _Arr[2] = static_cast((_Ext[0] >> _Mip_offset) ? (_Ext[0] >> _Mip_offset) : 1U); break; default: _ASSERTE(false); _Arr[0] = 1; _Arr[1] = 1; _Arr[2] = 1; break; } return _Arr; } template std::array _Get_indices(const index<_Rank> &_Idx) { std::array _Arr; // For un-used dimensions, use value 0. switch((_Rank)) { case 1: _Arr[0] = static_cast(_Idx[0]); _Arr[1] = 0; _Arr[2] = 0; break; case 2: _Arr[0] = static_cast(_Idx[1]); _Arr[1] = static_cast(_Idx[0]); _Arr[2] = 0; break; case 3: _Arr[0] = static_cast(_Idx[2]); _Arr[1] = static_cast(_Idx[1]); _Arr[2] = static_cast(_Idx[0]); break; default: _ASSERTE(false); _Arr[0] = 0; _Arr[1] = 0; _Arr[2] = 0; break; } return _Arr; } template Concurrency::extent<_Rank> _Create_extent(size_t _Width, size_t _Height, size_t _Depth) { extent<_Rank> _Ext; switch((_Rank)) { case 1: _Ext[0] = static_cast(_Width); break; case 2: _Ext[0] = static_cast(_Height); _Ext[1] = static_cast(_Width); break; case 3: _Ext[0] = static_cast(_Depth); _Ext[1] = static_cast(_Height); _Ext[2] = static_cast(_Width); break; default: _ASSERTE(false); break; } return _Ext; } // forward declaration template class _Texture_base; template _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Offset, const Concurrency::extent<_Rank> &_Copy_extent); template _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, OutputIterator _Dest_iter); template _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent); // The base class for texture, writeonly_texture_view template class _Texture_base { static_assert(_Rank > 0 && _Rank <= 3, "texture is only supported for rank 1, 2, and 3."); static_assert(_Short_vector_type_traits::type>::_Is_valid_SVT_for_texture, "invalid value_type for a texture."); // Friends template friend const _Texture_descriptor& Concurrency::details::_Get_texture_descriptor(const _T& _Tex) __GPU; template friend _Ret_ _Texture* Concurrency::details::_Get_texture(const _T& _Tex) __CPU_ONLY; template friend _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) __CPU_ONLY; public: static const int rank = _Rank; typedef typename _Value_type value_type; typedef typename _Short_vector_type_traits<_Value_type>::_Scalar_type scalar_type; public: /// /// Returns the extent that defines the shape of this texture or texture view. /// __declspec(property(get=get_extent)) Concurrency::extent<_Rank> extent; Concurrency::extent<_Rank> get_extent() const __GPU { return _M_extent; } /// /// Returns the extent for specific mipmap level of this texture or texture view. /// /// /// Mipmap level for which extent should be calculated. /// Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __CPU_ONLY { if (_Mipmap_level >= this->get_mipmap_levels()) { std::stringstream _Err_msg; _Err_msg << "Value for _Mipmap_level parameter (" << _Mipmap_level << ") cannot be greater than or equal to number of mipmap levels (" << this->get_mipmap_levels() << ") on the texture or texture view"; throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG); } return Concurrency::details::_Get_extent_at_level(_M_extent, _Mipmap_level); } /// /// Returns the extent for specific mipmap level of this texture or texture view. /// /// /// Mipmap level for which extent should be calculated. /// Concurrency::extent<_Rank> get_mipmap_extent(unsigned int _Mipmap_level) const __GPU_ONLY { return Concurrency::details::_Get_extent_at_level_unsafe(_M_extent, _Mipmap_level); } /// /// Returns the accelerator_view where this texture or texture view is located. /// __declspec(property(get=get_accelerator_view)) Concurrency::accelerator_view accelerator_view; Concurrency::accelerator_view get_accelerator_view() const __CPU_ONLY { return _Get_texture()->_Get_access_on_accelerator_view(); } /// /// Returns the number of bits per scalar element /// __declspec(property(get=get_bits_per_scalar_element)) unsigned int bits_per_scalar_element; unsigned int get_bits_per_scalar_element() const __CPU_ONLY { unsigned int _Bits_per_channel = _Get_texture()->_Get_bits_per_channel(); return _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Bits_per_channel * (sizeof(double)/sizeof(int)) : _Bits_per_channel; } /// /// Query how many mipmap levels are accessible by this texture (or texture view). /// /// /// Returns number of mipmap levels accessible by this texture (or texture view). /// __declspec(property(get=get_mipmap_levels)) unsigned int mipmap_levels; unsigned int get_mipmap_levels() const __GPU { return _M_texture_descriptor._Get_view_mipmap_levels(); } /// /// Returns the physical data length (in bytes) that is required in order to represent /// the texture on the host side with its native format. /// If the texture contains multiple mipmap levels the value represents the sum of physical data length for each accessible mipmap level by this texture (or texture view). /// __declspec(property(get=get_data_length)) unsigned int data_length; unsigned int get_data_length() const __CPU_ONLY { return _Get_texture()->_Get_data_length(this->_Get_most_detailed_mipmap_level(), this->get_mipmap_levels()); } protected: // internal storage abstraction typedef Concurrency::details::_Texture_descriptor _Texture_descriptor; _Texture_base() __CPU_ONLY { // This default ctor is required to enable move ctor for a derived types, // empty _Texture_base is later initialized by move assigment operator } _Texture_base(const Concurrency::extent<_Rank>& _Ext, unsigned int _Mipmap_levels = 1) __CPU_ONLY : _M_extent(_Ext), _M_texture_descriptor(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels) { _Is_valid_extent(_M_extent); _Are_valid_mipmap_parameters(/*_Most_detailed_mipmap_level=*/0, _Mipmap_levels); // Validate if we can generate _Mipmap_levels number of mipmap levels given the dimensionality of the texture unsigned int _Max_mipmap_levels = _Get_max_mipmap_levels(_M_extent); if (_Mipmap_levels > _Max_mipmap_levels) { std::stringstream _Err_msg; _Err_msg << "The texture extent is too small to generate (" << _Mipmap_levels << ") mipmap levels, the maximum allowed is (" << _Max_mipmap_levels << ")"; throw runtime_exception(_Err_msg.str().c_str(), E_INVALIDARG); } else if (_Mipmap_levels == 0) { // Generate full range of all mipmaps // e.g. 2D 10x2 texture would have: 10x2, 5x1, 2x1, 1x1 (4 mipmap levels) _Mipmap_levels = _Max_mipmap_levels; } _M_texture_descriptor._Set_view_mipmap_levels(_Mipmap_levels); } // shallow copy for texture_views _Texture_base(const _Texture_base & _Src) __GPU : _M_extent(_Src._M_extent), _M_texture_descriptor(_Src._M_texture_descriptor) { } // shallow copy for texture_views that redefine range of mipmaps _Texture_base(const _Texture_base & _Src, unsigned int _Most_detailed_mipmap_level, unsigned int _View_mipmap_levels) __CPU_ONLY : _M_extent(_Get_extent_at_level(_Src.extent, _Most_detailed_mipmap_level)), _M_texture_descriptor(_Src._M_texture_descriptor, _Src._Get_most_detailed_mipmap_level() + _Most_detailed_mipmap_level, _View_mipmap_levels) { Concurrency::details::_Is_valid_mipmap_range(_Src.get_mipmap_levels(), _Most_detailed_mipmap_level, _View_mipmap_levels); } // shallow copy for texture_views that in restrict(amp) context, the texture views can no longer redefine mipmap range, // but read-write texture view needs to flatten to single mipmap level when created over a texture with multiple mipmap levels. _Texture_base(const _Texture_base & _Src, bool _Flatten_mipmap_levels) __GPU_ONLY : _M_extent(_Src.extent), _M_texture_descriptor(_Src._M_texture_descriptor, /*_Most_detailed_mipmap_level=*/0, _Flatten_mipmap_levels ? /*_View_mipmap_levels=*/1 : _Src.get_mipmap_levels()) { } // interop _Texture_base(const Concurrency::extent<_Rank>& _Ext, const _Texture_descriptor & _Desc) __CPU_ONLY : _M_extent(_Ext), _M_texture_descriptor(_Desc) { Concurrency::details::_Is_valid_extent(_M_extent); } void _Copy_to(const _Texture_base & _Dest) const __CPU_ONLY { if (!(*this == _Dest)) { _ASSERTE(this->extent == _Dest.extent); details::_Copy_async_impl(*this, index<_Rank>(), _Dest, index<_Rank>(), _Dest.extent)._Get(); } } bool operator==(const _Texture_base & _Other) const __CPU_ONLY { return _Other._M_extent == _M_extent && _Other._M_texture_descriptor == _M_texture_descriptor; } ~_Texture_base() __GPU { } _Ret_ _Texture* _Get_texture() const __CPU_ONLY { return _M_texture_descriptor._Get_texture_ptr(); } unsigned int _Get_most_detailed_mipmap_level() const __GPU { return _M_texture_descriptor._Get_most_detailed_mipmap_level(); } bool _Are_mipmap_levels_overlapping(const _Texture_base &_Other) const __CPU_ONLY { return _M_texture_descriptor._Are_mipmap_levels_overlapping(&_Other._M_texture_descriptor); } protected: Concurrency::extent<_Rank> _M_extent; _Texture_descriptor _M_texture_descriptor; }; inline void _Is_valid_data_length(unsigned int _Num_elems, unsigned int _Bits_per_elem) { unsigned long long _Bytes_per_elem = static_cast(_Bits_per_elem / 8U); unsigned long long _Total_bytes = static_cast(_Num_elems) * _Bytes_per_elem; if (_Total_bytes > static_cast(UINT_MAX)) { throw runtime_exception("Invalid - texture data_length exceeds UINT_MAX", E_INVALIDARG); } } } // namespace details using Concurrency::graphics::details::_Short_vector_type_traits; // forward declarations template class texture; template class writeonly_texture_view; template class texture_view; class sampler; namespace direct3d { template texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format = DXGI_FORMAT_UNKNOWN) __CPU_ONLY; sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY; _Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY; } // namespace direct3d /// /// A texture is a data aggregate on an accelerator_view in the extent domain. /// It is a collection of variables, one for each element in an extent domain. /// Each variable holds a value corresponding to C++ primitive type (unsigned int, /// int, float, double), or scalar type norm, or unorm (defined in concurrency::graphics), /// or eligible short vector types defined in concurrency::graphics. /// /// /// The type of the elements in the texture aggregates. /// /// /// The _Rank of the corresponding extent domain. /// template class texture : public details::_Texture_base<_Value_type, _Rank> { template friend texture<_Value_type,_Rank> direct3d::make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format) __CPU_ONLY; static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for texture."); public: /// /// Construct a texture from extents. /// /// /// An extent that describes the shape of the texture. /// texture(const Concurrency::extent<_Rank>& _Ext) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view); } /// /// Construct texture<T,1> with the extent _E0 /// /// /// An integer that is the length of this texture (width). /// texture(int _E0) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view); } /// /// Construct a texture<T,2> from two integer extents. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// texture(int _E0, int _E1) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view); } /// /// Construct a texture<T,3> from three integer extents. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// texture(int _E0, int _E1, int _E2) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view); } /// /// Construct a texture from extents, bound to a specific accelerator_view. /// /// /// An extent that describes the shape of the texture. /// /// /// An accelerator_view where this texture resides. /// texture(const Concurrency::extent<_Rank>& _Ext, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av); } /// /// Construct a staging texture from extents, bound to a specific accelerator_view /// and an associated accelerator_view that is the preferred location for copying /// to/from this texture. /// /// /// An extent that describes the shape of the texture. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const Concurrency::extent<_Rank>& _Ext, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av); } /// /// Construct a texture<T,1> with the extent _E0, bound to a specific accelerator_view. /// /// /// An integer that is the length of this texture (width). /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av); } /// /// Construct a staging texture<T,1> with the extent _E0, bound to a specific /// accelerator_view and an associated accelerator_view that is the preferred location /// for copying to/from this texture. /// /// /// An integer that is the length of this texture (width). /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av); } /// /// Construct a texture<T,2> from two integer extents, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av); } /// /// Construct a staging texture<T,2> from two integer extents, bound to a /// specific accelerator_view and an associated accelerator_view that is the /// preferred location for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av); } /// /// Construct a texture<T,3> from three integer extents, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av); } /// /// Construct a staging texture<T,3> from three integer extents, bound to a /// specific accelerator_view and an associated accelerator_view that is the preferred /// location for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, int _E2, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av); } /// /// Construct a texture initialized from a pair of iterators into a container. /// /// /// An extent that describes the shape of the texture. /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// template texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last); } /// /// Construct a texture<T,1> with the extent _E0 and from a pair of iterators into a container. /// /// /// An integer that is the length of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// template texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, iterator, iterator) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last); } /// /// Construct a texture<T,2> with two integers and initialized from a pair of iterators into a container. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// template texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, iterator, iterator) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last); } /// /// Construct a texture<T,3> with three integers and initialized from a pair of iterators into a container. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// template texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Src_first, _Src_last); } /// /// Construct a texture initialized from a pair of iterators into a container, bound to a specific accelerator_view. /// /// /// An extent that describes the shape of the texture. /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// template texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Src_first, _Src_last); } /// /// Construct a staging texture initialized from a pair of iterators into a container, /// bound to a specific accelerator_view and an associated accelerator_view that is the /// preferred location for copying to/from this texture. /// /// /// An extent that describes the shape of the texture. /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// template texture(const Concurrency::extent<_Rank>& _Ext, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(_Ext) { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av, _Src_first, _Src_last); } /// /// Construct a texture<T,1> with integer _E0 and initialized from a pair of iterators into a container, bound to a specific accelerator_view. /// /// /// An integer that is the length of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// template texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Src_first, _Src_last); } /// /// Construct a staging texture<T,1> with integer _E0 and initialized from a pair of iterators /// into a container, bound to a specific accelerator_view and an associated accelerator_view that is /// the preferred location for copying to/from this texture. /// /// /// An integer that is the length of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// template texture(int _E0, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av, _Src_first, _Src_last); } /// /// Construct a texture<T,2> with two integers and initialized from a pair of iterators into a container, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// template texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Src_first, _Src_last); } /// /// Construct a staging texture<T,2> with two integers and initialized from a pair of iterators /// into a container, bound to a specific accelerator_view and an associated accelerator_view that is /// the preferred location for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// template texture(int _E0, int _E1, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av, _Src_first, _Src_last); } /// /// Construct a texture<T,3> with three integers and initialized from a pair of iterators into a container, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// template texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Src_first, _Src_last); } /// /// Construct a staging texture<T,3> with three integers and initialized from a pair of iterators /// into a container, bound to a specific accelerator_view and an associated accelerator_view that is the /// preferred location for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// An ending iterator into the source container. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// template texture(int _E0, int _E1, int _E2, _Input_iterator _Src_first, _Input_iterator _Src_last, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on texture."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "texture cannot be constructed from unorm based short vectors via this constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "texture cannot be constructed from norm based short vectors via this constructor."); _Initialize(_Av, _Associated_av, _Src_first, _Src_last); } /// /// Construct a texture from extents and specified bits per scalar element /// /// /// An extent that describes the shape of the texture. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element); } /// /// Construct a texture from extents, specified bits per scalar element and number of mipmap levels /// /// /// An extent that describes the shape of the texture. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// Number of mipmap levels in the underlying texture. /// If 0 is specified, the texture will have full range of mipmap levels down to smallest possible size for the given extent. /// texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels) __CPU_ONLY : _Texture_base(_Ext, _Mipmap_levels) { _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element); } /// /// Construct a texture<T,1> with integer _E0 and specified bits per scalar element /// /// /// An integer that is the length of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element); } /// /// Construct a texture<T,2> with two integers and specified bits per scalar element /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element); } /// /// Construct a texture<T,3> with three integers and specified bits per scalar element /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the next-to-most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A beginning iterator into the source container. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Bits_per_scalar_element); } /// /// Construct a texture from extents and specified bits per scalar element, bound to a specific accelerator_view. /// /// /// An extent that describes the shape of the texture. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(_Av, _Bits_per_scalar_element); } /// /// Construct a texture from extents, specified bits per scalar element and number of mipmap levels /// /// /// An extent that describes the shape of the texture. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// Number of mipmap levels in the underlying texture. /// If 0 is specified, the texture will have full range of mipmap levels down to smallest possible size for the given extent. /// /// /// An accelerator_view where this texture resides. /// texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, unsigned int _Mipmap_levels, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(_Ext, _Mipmap_levels) { _Initialize(_Av, _Bits_per_scalar_element); } /// /// Construct a staging texture from extents and specified bits per scalar element, bound to a /// specific accelerator_view and an associated accelerator_view that is the preferred location /// for copying to/from this texture. /// /// /// An extent that describes the shape of the texture. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const Concurrency::extent<_Rank>& _Ext, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(_Av, _Associated_av, _Bits_per_scalar_element); } /// /// Construct a texture<T, 1> with integer _E0 and specified bits per scalar element, bound to a specific accelerator. /// /// /// An integer that is the length of the most-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Bits_per_scalar_element); } /// /// Construct a staging texture<T, 1> with integer _E0 and specified bits per scalar element, /// bound to a specific accelerator and an associated accelerator_view that is the preferred location /// for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Bits_per_scalar_element); } /// /// Construct a texture<T,2> with two integers and specified bits per scalar element, bound to a specific accelerator. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Bits_per_scalar_element); } /// /// Construct a staging texture<T,2> with two integers and specified bits per scalar element, /// bound to a specific accelerator and an associated accelerator_view that is the preferred location /// for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Bits_per_scalar_element); } /// /// Construct a texture<T,3> with three integers and specified bits per scalar element, bound to a specific accelerator. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the least-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Bits_per_scalar_element); } /// /// Construct a staging texture<T,3> with three integers and specified bits per scalar element, /// bound to a specific accelerator and an associated accelerator_view that is the preferred location /// for copying to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the least-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, int _E2, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Bits_per_scalar_element); } /// /// Construct a texture from extents and specified bits per scalar element, initialized from a host buffer. /// /// /// An extent that describes the shape of the texture. /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T,1> with integer _E0 and specified bits per scalar element, initialized from a host buffer. /// /// /// An integer that is the length of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T,2> with two integers and specified bits per scalar element, initialized from a host buffer. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T,3> with three integers and specified bits per scalar element, initialized from a host buffer. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the least-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int) is only permissible on texture."); _Initialize(Concurrency::details::_Select_default_accelerator().default_view, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture from extents and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view. /// /// /// An extent that describes the shape of the texture. /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a staging texture from extents and specified bits per scalar element, initialized from a host buffer, /// bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying /// to/from this texture. /// /// /// An extent that describes the shape of the texture. /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const Concurrency::extent<_Rank>& _Ext, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(_Ext) { _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T, 1> with integer _E0 and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view. /// /// /// An integer that is the length of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a staging texture<T, 1> with integer _E0 and specified bits per scalar element, initialized from a host buffer, /// bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying /// to/from this texture. /// /// /// An integer that is the length of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0)) { static_assert(_Rank == 1, "texture(int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T, 2> with two integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a staging texture<T, 2> with two integers and specified bits per scalar element, initialized from a host buffer, /// bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying /// to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1)) { static_assert(_Rank == 2, "texture(int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture<T, 3> with three integers and specified bits per scalar element, initialized from a host buffer, bound to a specific accelerator_view. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the least-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a staging texture<T, 3> with three integers and specified bits per scalar element, initialized from a host buffer, /// bound to a specific accelerator_view and an associated accelerator_view that is the preferred location for copying /// to/from this texture. /// /// /// An integer that is the length of the most-significant dimension of this texture (depth). /// /// /// An integer that is the length of the least-significant dimension of this texture (height). /// /// /// An integer that is the length of the least-significant dimension of this texture (width). /// /// /// A pointer to a host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// Number of bits per each scalar element in the underlying scalar type of the texture. /// In general, supported value is 8, 16, 32, 64. /// If 0 is specified, the number of bits picks defaulted value for the underlying scalar_type. /// 64 is only valid for double based textures /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(int _E0, int _E1, int _E2, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element, const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY : _Texture_base(Concurrency::extent<_Rank>(_E0, _E1, _E2)) { static_assert(_Rank == 3, "texture(int, int, int, void *, unsigned int, unsigned int, accelerator_view, accelerator_view) is only permissible on texture."); _Initialize(_Av, _Associated_av, _Source, _Src_byte_size, _Bits_per_scalar_element); } /// /// Construct a texture from a texture_view. Deep copy /// /// /// The texture_view to copy from. /// texture(const texture_view<_Value_type, _Rank> & _Src) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Src.accelerator_view, _Src); } /// /// Construct a texture from a read-only texture_view. Deep copy /// /// /// The read-only texture_view to copy from. /// texture(const texture_view & _Src) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Src.accelerator_view, _Src); } /// /// Construct a texture from a texture_view on another accelerator_view. Deep copy /// /// /// The texture_view to copy from. /// /// /// An accelerator_view where this texture resides. /// texture(const texture_view<_Value_type, _Rank> & _Src, const Concurrency::accelerator_view & _Acc_view) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Acc_view, _Src); } /// /// Construct a texture from a read-only texture_view on another accelerator_view. Deep copy /// /// /// The read-only texture_view to copy from. /// /// /// An accelerator_view where this texture resides. /// texture(const texture_view & _Src, const Concurrency::accelerator_view & _Acc_view) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Acc_view, _Src); } /// /// Construct a staging texture from a texture_view on another accelerator_view. Deep copy /// /// /// The texture_view to copy from. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const texture_view<_Value_type, _Rank> & _Src, const Concurrency::accelerator_view & _Acc_view, const Concurrency::accelerator_view& _Associated_av) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Acc_view, _Associated_av, _Src); } /// /// Construct a staging texture from a read-only texture_view on another accelerator_view. Deep copy /// /// /// The read-only texture_view to copy from. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const texture_view & _Src, const Concurrency::accelerator_view & _Acc_view, const Concurrency::accelerator_view& _Associated_av) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Acc_view, _Associated_av, _Src); } /// /// Copy constructor. Deep copy /// /// /// The texture to copy from. /// texture(const texture & _Src) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Src.accelerator_view, _Src.associated_accelerator_view, _Src); } /// /// Move constructor /// /// /// The source texture to move from. /// texture(texture && _Other) { *this = std::move(_Other); } /// /// Copy constructor. Deep copy /// /// /// The texture to copy from. /// /// /// An accelerator_view where this texture resides. /// texture(const texture & _Src, const Concurrency::accelerator_view & _Av) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Av, _Src); } /// /// Copy constructor. Deep copy /// /// /// The texture to copy from. /// /// /// An accelerator_view where this texture resides. /// /// /// An accelerator_view which specifies the preferred target location for copies /// to/from the texture. /// texture(const texture & _Src, const Concurrency::accelerator_view & _Av, const Concurrency::accelerator_view& _Associated_av) : _Texture_base(_Src.extent, _Src.get_mipmap_levels()) { _Initialize(_Av, _Associated_av, _Src); } /// /// Copy assignment operator. Deep copy /// /// /// The texture to copy from. /// /// /// A reference to this texture. /// texture& operator=(const texture & _Other) { if (this != &_Other) { _M_extent = _Other._M_extent; _M_texture_descriptor._Set_view_mipmap_levels(_Other.get_mipmap_levels()); _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view, _Other); } return *this; } /// /// Move assignment operator /// /// /// The source texture to move from. /// /// /// A reference to this texture. /// texture& operator=(texture<_Value_type, _Rank> && _Other) { if (this != &_Other) { _M_extent = _Other._M_extent; _M_texture_descriptor = _Other._M_texture_descriptor; _Other._M_texture_descriptor._M_data_ptr = NULL; _Other._M_texture_descriptor._Set_texture_ptr(NULL); } return *this; } /// /// Copy-to, deep copy /// /// /// The destination texture to copy to. /// void copy_to(texture & _Dest) const { if (this->extent != _Dest.extent) { throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG); } auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(*this), concurrency::details::_Get_texture_descriptor(_Dest), this->get_data_length()); _Texture_base::_Copy_to(_Dest); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copy-to, deep copy /// /// /// The destination writeonly_texture_view to copy to. /// #pragma warning( push ) #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated void copy_to(const writeonly_texture_view<_Value_type, _Rank> & _Dest) const { if (this->extent != _Dest.extent) { throw runtime_exception("The source and destination textures must have the exactly the same extent.", E_INVALIDARG); } auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(*this), concurrency::details::_Get_texture_descriptor(_Dest), this->get_data_length()); _Texture_base::_Copy_to(_Dest); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Destructor /// ~texture() __CPU_ONLY { } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY { value_type _Tmp; _Texture_read_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0); return _Tmp; } /// /// Get the element value indexed by _I. /// /// /// The index. /// /// /// The element value indexed by _I. /// const value_type operator[] (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "value_type texture::operator[](int) is only permissible on texture."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY { return (*this)[_Index]; } /// /// Get the element value indexed by _I0 /// /// /// The index. /// /// /// The element value indexed by _I0. /// const value_type operator() (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "value_type texture::operator()(int) is only permissible on texture."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by (_I0,_I1) /// /// /// The most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1) /// const value_type operator() (int _I0, int _I1) const __GPU_ONLY { static_assert(_Rank == 2, "value_type texture::operator()(int, int) is only permissible on texture."); return (*this)[index<2>(_I0, _I1)]; } /// /// Get the element value indexed by (_I0,_I1,_I2) /// /// /// The most-significant component of the index /// /// /// The next-to-most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1,_I2) /// const value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY { static_assert(_Rank == 3, "value_type texture::operator()(int, int, int) is only permissible on texture."); return (*this)[index<3>(_I0, _I1, _I2)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type get(const index<_Rank>& _Index) const __GPU_ONLY { return (*this)[_Index]; } /// /// Set the element indexed by _Index with value _Value. /// /// /// The index. /// /// /// The value to be set to the element indexed by _Index. /// void set(const index<_Rank>& _Index, const value_type& _Value) __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for set method."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for set method."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for set method."); _Texture_write_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Value, _Index); } /// /// Returns a CPU pointer to the raw data of this texture. /// _Ret_ void* data() __CPU_ONLY { return _Get_texture()->_Get_host_ptr(); } /// /// Returns a CPU pointer to the raw data of this texture. /// const void* data() const __CPU_ONLY { return _Get_texture()->_Get_host_ptr(); } /// /// Returns the row pitch (in bytes) of a 2D or 3D staging texture on the CPU to be /// used for navigating the staging texture from row to row on the CPU. /// __declspec(property(get=get_row_pitch)) unsigned int row_pitch; unsigned int get_row_pitch() const __CPU_ONLY { static_assert(_Rank >= 2, "row_pitch is only applicable to staging textures with rank 2 or higher."); if (!_Get_texture()->_Is_staging()) { throw runtime_exception("row_pitch is only applicable to staging textures.", E_INVALIDARG); } return static_cast(_Get_texture()->_Get_row_pitch()); } /// /// Returns the depth pitch (in bytes) of a 3D staging texture on the CPU to be used /// for navigating the staging texture from depth slice to depth slice on the CPU. /// __declspec(property(get=get_depth_pitch)) unsigned int depth_pitch; unsigned int get_depth_pitch() const __CPU_ONLY { static_assert(_Rank == 3, "depth_pitch is only applicable to staging textures with rank 3."); if (!_Get_texture()->_Is_staging()) { throw runtime_exception("depth_pitch is only applicable to staging textures.", E_INVALIDARG); } return static_cast(_Get_texture()->_Get_depth_pitch()); } /// /// Returns the accelerator_view that is the preferred target where this texture can be copied. /// __declspec(property(get=get_associated_accelerator_view)) Concurrency::accelerator_view associated_accelerator_view; Concurrency::accelerator_view get_associated_accelerator_view() const __CPU_ONLY { return _Get_texture()->_Get_accelerator_view(); } private: // Private constructor used by make_texture to create a texture from D3D texture texture(const Concurrency::extent<_Rank> & _Ext, const _Texture_descriptor & _Descriptor) : details::_Texture_base<_Value_type, _Rank>(_Ext, _Descriptor) { } bool _Should_create_staging_texture(const Concurrency::accelerator_view &_Av, const Concurrency::accelerator_view &_Associated_av) { return (_Is_cpu_accelerator(_Av.accelerator) && !_Is_cpu_accelerator(_Associated_av.accelerator)); } void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, unsigned int _Bits_per_scalar_element) __CPU_ONLY { if (_Bits_per_scalar_element != 8 && _Bits_per_scalar_element != 16 && _Bits_per_scalar_element != 32 && _Bits_per_scalar_element != 64) { throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 8, 16, 32, or 64.", E_INVALIDARG); } // special cases for 64 and for double based textures #pragma warning( push ) #pragma warning( disable : 4127 ) // conditional expression is constant if (_Bits_per_scalar_element == 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type) { throw runtime_exception("Invalid _Bits_per_scalar_element argument - 64 is only valid for texture of double based short vector types.", E_INVALIDARG); } if (_Bits_per_scalar_element != 64 && _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type) { throw runtime_exception("Invalid _Bits_per_scalar_element argument - it can only be 64 for texture of double based short vector types.", E_INVALIDARG); } details::_Is_valid_data_length(_M_extent.size(), _Bits_per_scalar_element * _Short_vector_type_traits<_Value_type>::_Num_channels); // the rest of the check is done by _Texture::_Create_texture, it depends on the underlying supported DXGI formats. unsigned int _Bits_per_channel = _Bits_per_scalar_element; if (_Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type) { _Bits_per_channel = _Short_vector_type_traits<_Value_type>::_Default_bits_per_channel; } std::array _Dimensions = Concurrency::graphics::details::_Get_dimensions(_M_extent, /*_Mip_offset=*/0); // release the old texture first before allocating new one to avoid the chance on hitting OOM _M_texture_descriptor._Set_texture_ptr(NULL); _Texture_ptr _Tex_ptr = NULL; // See if we need to allocate a staging texture if (_Should_create_staging_texture(_Av, _Associated_av)) { if (_M_texture_descriptor._Get_view_mipmap_levels() > 1) { throw runtime_exception("Creating staging textures with mipmap levels > 1 is not supported", E_INVALIDARG); } _Tex_ptr = _Texture::_Create_stage_texture( _Associated_av, _Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], _M_texture_descriptor._Get_view_mipmap_levels(), _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Uint_type : _Short_vector_type_traits<_Value_type>::_Format_base_type_id, _Short_vector_type_traits<_Value_type>::_Num_channels, _Bits_per_channel); // Now map the texture _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */); } else { _Tex_ptr = _Texture::_Create_texture(_Av, _Rank, _Dimensions[0], _Dimensions[1], _Dimensions[2], _M_texture_descriptor._Get_view_mipmap_levels(), _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Uint_type : _Short_vector_type_traits<_Value_type>::_Format_base_type_id, _Short_vector_type_traits<_Value_type>::_Num_channels, _Bits_per_channel); } _M_texture_descriptor._Set_texture_ptr(_Tex_ptr); #pragma warning( pop ) } void _Initialize(const Concurrency::accelerator_view& _Av, unsigned int _Bits_per_scalar_element) __CPU_ONLY { _Initialize(_Av, _Av, _Bits_per_scalar_element); } void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av) __CPU_ONLY { _Initialize(_Av, _Associated_av, Concurrency::graphics::details::_Get_default_bits_per_scalar_element<_Value_type>()); } void _Initialize(const Concurrency::accelerator_view& _Av) __CPU_ONLY { _Initialize(_Av, _Av); } template void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY { _Initialize(_Av, _Associated_av); auto _Span_id = Concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(nullptr, Concurrency::details::_Get_texture_descriptor(*this), this->get_data_length()); Concurrency::graphics::details::_Copy_async_impl(_Src_first, _Src_last, *this, index<_Rank>(), this->extent)._Get(); Concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } template void _Initialize(const Concurrency::accelerator_view& _Av, _Input_iterator _Src_first, _Input_iterator _Src_last) __CPU_ONLY { _Initialize(_Av, _Av, _Src_first, _Src_last); } void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY { _Initialize(_Av, _Associated_av, _Bits_per_scalar_element); Concurrency::graphics::copy(_Source, _Src_byte_size, *this); } void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size, unsigned int _Bits_per_scalar_element) __CPU_ONLY { _Initialize(_Av, _Av, _Source, _Src_byte_size, _Bits_per_scalar_element); } void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY { _Initialize(_Av, _Associated_av); Concurrency::graphics::copy(_Source, _Src_byte_size, *this); } void _Initialize(const Concurrency::accelerator_view& _Av, const void * _Source, unsigned int _Src_byte_size) __CPU_ONLY { _Initialize(_Av, _Av, _Source, _Src_byte_size); } void _Initialize(const Concurrency::accelerator_view& _Av, const Concurrency::accelerator_view& _Associated_av, const details::_Texture_base<_Value_type, _Rank> & _Src) __CPU_ONLY { if (_Src.bits_per_scalar_element != 0) // _Src is not created via interop { _Initialize(_Av, _Associated_av, _Src.bits_per_scalar_element); } else // _Src is created via interop, create a new texture with the same properties as the existing one. { _Texture_ptr _New_tex; if (_Should_create_staging_texture(_Av, _Associated_av)) { _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Associated_av, _Av); } else { _New_tex = _Texture::_Clone_texture(concurrency::details::_Get_texture(_Src), _Av, _Associated_av); } _M_texture_descriptor._Set_texture_ptr(_New_tex); } auto _Span_id = Concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(Concurrency::details::_Get_texture_descriptor(_Src), Concurrency::details::_Get_texture_descriptor(*this), this->get_data_length()); Concurrency::graphics::details::_Copy_async_impl(_Src, index<_Rank>(), *this, index<_Rank>(), this->extent)._Get(); Concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } void _Initialize(const Concurrency::accelerator_view& _Av, const details::_Texture_base<_Value_type, _Rank> & _Src) __CPU_ONLY { _Initialize(_Av, _Av, _Src); } }; /// /// A writeonly_texture_view provides writeonly access to a texture. /// /// /// The type of the elements in the texture aggregates. /// /// /// The _Rank of the corresponding extent domain. /// #pragma warning( push ) #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated template class __declspec(deprecated("writeonly_texture_view is deprecated. Please use texture_view instead.")) writeonly_texture_view : public details::_Texture_base<_Value_type, _Rank> { static_assert(!std::is_const<_Value_type>::value, "const value type is not supported for writeonly_texture_view."); public: /// /// Construct a writeonly_texture_view of a texture _Src. /// /// /// The texture on which the writeonly view is created. /// writeonly_texture_view(texture<_Value_type, _Rank>& _Src) __CPU_ONLY : _Texture_base(_Src, /*_Most_detailed_mipmap_level=*/0, /*_View_mipmap_levels=*/1) { _Texture* _Tex = _Get_texture(); if ((_Tex->_Get_num_channels() == 3) && (_Tex->_Get_bits_per_channel() == 32)) { throw runtime_exception("writeonly_texture_view cannot be created from a 3-channel texture with 32 bits per scalar element.", E_INVALIDARG); } if (_Tex->_Is_staging()) { throw runtime_exception("writeonly_texture_view cannot be created from a staging texture object.", E_INVALIDARG); } } /// /// Construct a writeonly_texture_view of a texture _Src. /// /// /// The texture on which the writeonly view is created. /// writeonly_texture_view(texture<_Value_type, _Rank>& _Src) __GPU_ONLY : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true) { static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Invalid value_type for the constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Invalid value_type for the constructor."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Invalid value_type for the constructor."); } /// /// Construct a writeonly_texture_view from another writeonly_texture_view. Both are views of the same texture. /// /// /// The writeonly_texture_view from which the current view is created. /// writeonly_texture_view(const writeonly_texture_view<_Value_type, _Rank>& _Src) __GPU : _Texture_base(_Src) { } /// /// Assignment operator. This writeonly_texture_view becomes a view of the same texture which _Other is a view of. /// /// /// The source writeonly_texture_view. /// writeonly_texture_view<_Value_type, _Rank>& operator=(const writeonly_texture_view<_Value_type, _Rank>& _Other) __GPU { if (this != &_Other) { _M_extent = _Other._M_extent; _M_texture_descriptor = _Other._M_texture_descriptor; } return *this; } /// /// Destructor /// ~writeonly_texture_view() __GPU { } /// /// Set the element indexed by _Index with value _Value. /// /// /// The index. /// /// /// The value to be set to the element indexed by _Index. /// void set(const index<_Rank>& _Index, const value_type& _Value) const __GPU_ONLY { _Texture_write_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Value, _Index); } }; #pragma warning( pop ) /// /// A texture_view provides read and write access to a texture. /// Note that currently texture_view can only be used to read textures whose value type is int, unsigned int and float /// with default 32 bit bpse. To read other texture formats, use texture_view<const _Value_type, _Rank>. /// /// /// The type of the elements in the texture aggregates. /// /// /// The _Rank of the corresponding extent domain. /// template class texture_view : public details::_Texture_base<_Value_type, _Rank> { friend class texture_view; public: /// /// Construct a texture_view of a texture _Src on host. /// /// /// The texture on which the texture_view is created. /// /// /// The specific mipmap level on a _Src texture that this read and write texture_view should bind to. /// The default value 0, binds to the top mosted detail mipmap level. /// texture_view(texture<_Value_type, _Rank>& _Src, unsigned int _Mipmap_level = 0) __CPU_ONLY : _Texture_base(_Src, _Mipmap_level, /*_View_mipmap_levels=*/1) { if (_Get_texture()->_Is_staging()) { throw runtime_exception("texture_view cannot be created from a staging texture object.", E_INVALIDARG); } } /// /// Construct a texture_view of a texture _Src on an accelerator. /// /// /// The texture on which the texture_view is created. /// texture_view(texture<_Value_type, _Rank>& _Src) __GPU_ONLY : _Texture_base(_Src, /*_Flatten_mipmap_levels=*/true) { static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "writable texture_view can only be created from a single-component texture on an accelerator."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "writable texture_view cannot be created from a unorm texture on an accelerator."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "writable texture_view cannot be created from a norm texture on an accelerator."); } /// /// Construct a texture_view from another texture_view. Both are views of the same texture. /// /// /// The source texture_view. /// texture_view(const texture_view<_Value_type, _Rank>& _Other) __GPU : _Texture_base(_Other) { } /// /// Assignment operator. This texture_view becomes a view of the same texture which _Other is a view of. /// /// /// The source texture_view. /// texture_view<_Value_type, _Rank>& operator=(const texture_view<_Value_type, _Rank>& _Other) __GPU { if (this != &_Other) { _M_extent = _Other._M_extent; _M_texture_descriptor = _Other._M_texture_descriptor; } return *this; } /// /// Destructor /// ~texture_view() __GPU { } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels == 1, "Read is only permissible on single-component writable texture_view."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Unorm_type, "Read is not permissible on a writable unorm texture_view."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Norm_type, "Read is not permissible on a writable norm texture_view."); value_type _Tmp; _Texture_read_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0); return _Tmp; } /// /// Get the element value indexed by _I0. /// /// /// The index. /// /// /// The element value indexed by _I0. /// const value_type operator[] (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "const value_type operator[](int) is only permissible on texture_view."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY { return (*this)[_Index]; } /// /// Get the element value indexed by _I0 /// /// /// The index. /// /// /// The element value indexed by _I0. /// const value_type operator() (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "const value_type operator()(int) is only permissible on texture_view."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by (_I0,_I1) /// /// /// The most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1) /// const value_type operator() (int _I0, int _I1) const __GPU_ONLY { static_assert(_Rank == 2, "const value_type operator()(int, int) is only permissible on texture_view."); return (*this)[index<2>(_I0, _I1)]; } /// /// Get the element value indexed by (_I0,_I1,_I2) /// /// /// The most-significant component of the index /// /// /// The next-to-most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1,_I2) /// const value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY { static_assert(_Rank == 3, "const value_type operator()(int, int, int) is only permissible on texture_view."); return (*this)[index<3>(_I0, _I1, _I2)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// const value_type get(const index<_Rank>& _Index) const __GPU_ONLY { return (*this)[_Index]; } /// /// Set the element indexed by _Index with value _Value. /// /// /// The index. /// /// /// The value to be set to the element indexed by _Index. /// void set(const index<_Rank>& _Index, const value_type& _Value) const __GPU_ONLY { _Texture_write_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Value, _Index); } }; /// /// filter modes supported for texture sampling /// enum filter_mode { filter_point = 0, filter_linear = 0x15, filter_unknown = 0xFFFFFFFF, }; /// /// address modes supported for texture sampling /// enum address_mode { address_wrap = 1, address_mirror = 2, address_clamp = 3, address_border = 4, address_unknown = 0xFFFFFFFF, }; /// /// A sampler class aggregates sampling configuration information to be used for texture sampling. /// class sampler { friend sampler direct3d::make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY; friend _Ret_ IUnknown * direct3d::get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY; template friend class texture_view; public: /// /// Constructs a sampler with default filter mode (filter_lienar, same for min, mag, mip), addressing /// mode (address_clamp, same for all dimensions), and border color (float_4(0.0f, 0.0f, 0.0f, 0.0f)). /// sampler() __CPU_ONLY : _M_filter_mode(filter_linear), _M_address_mode(address_clamp), _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f)) { _Initialize(); } /// /// Constructs a sampler with specified filter mode (same for min, mag, mip), but with default addressing /// mode (address_clamp, same for all dimensions) and border color ( float_4(0.0f, 0.0f, 0.0f, 0.0f)). /// /// /// The filter mode to be used in sampling. /// sampler(filter_mode _Filter_mode)__CPU_ONLY : _M_filter_mode(_Filter_mode), _M_address_mode(address_clamp), _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f)) { _Initialize(); } /// /// Constructs a sampler with default filter mode (filter_linear, same for min, mag, mip), but specified /// addressing mode (same for all dimensions) and border color. /// /// /// The addressing mode to be used in sampling for all dimensions. /// /// /// The border color to be used if address mode is address_border. If not specified, default value is float_4(0.f, 0.f, 0.f, 0.f). /// sampler(address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY : _M_filter_mode(filter_linear), _M_address_mode(_Address_mode), _M_border_color(_Border_color) { _Initialize(); } /// /// Constructs a sampler with specified filter mode (same for min, mag, mip), addressing /// mode (same for all dimensions) and the border color. /// /// /// The filter mode to be used in sampling. /// /// /// The addressing mode to be used in sampling for all dimensions. /// /// /// The border color to be used if address mode is address_border. If not specified, default value is float_4(0.f, 0.f, 0.f, 0.f). /// sampler(filter_mode _Filter_mode, address_mode _Address_mode, float_4 _Border_color = float_4(0.0f, 0.0f, 0.0f, 0.0f)) __CPU_ONLY : _M_filter_mode(_Filter_mode), _M_address_mode(_Address_mode), _M_border_color(_Border_color) { _Initialize(); } /// /// Copy constructor. /// /// /// An object of type sampler from which to initialize this new sampler. /// sampler(const sampler& _Other) __GPU : _M_filter_mode(_Other._M_filter_mode), _M_address_mode(_Other._M_address_mode), _M_border_color(_Other._M_border_color), _M_sampler_descriptor(_Other._M_sampler_descriptor) { } /// /// Move constructor. /// /// /// The sampler to move from. /// sampler(sampler &&_Other) __GPU : _M_filter_mode(_Other._M_filter_mode), _M_address_mode(_Other._M_address_mode), _M_border_color(_Other._M_border_color), _M_sampler_descriptor(_Other._M_sampler_descriptor) { _Other._M_sampler_descriptor._M_data_ptr = NULL; _Other._M_sampler_descriptor._Set_sampler_ptr(NULL); } /// /// Assignment operator. /// /// /// An object of type sampler from which to copy into this sampler. /// /// /// A reference to this sampler. /// sampler& operator=(const sampler& _Other) __GPU { if (this != &_Other) { _M_filter_mode = _Other._M_filter_mode; _M_address_mode = _Other._M_address_mode; _M_border_color = _Other._M_border_color; _M_sampler_descriptor = _Other._M_sampler_descriptor; } return *this; } /// /// Move assignment operator. /// /// /// An object of type sampler to move from. /// /// /// A reference to this sampler. /// sampler& operator=(sampler&& _Other) __GPU { if (this != &_Other) { _M_filter_mode = _Other._M_filter_mode; _M_address_mode = _Other._M_address_mode; _M_border_color = _Other._M_border_color; _M_sampler_descriptor = _Other._M_sampler_descriptor; _Other._M_sampler_descriptor._M_data_ptr = NULL; _Other._M_sampler_descriptor._Set_sampler_ptr(NULL); } return *this; } /// /// Returns the sampler's filter mode /// __declspec(property(get=get_filter_mode)) Concurrency::graphics::filter_mode filter_mode; Concurrency::graphics::filter_mode get_filter_mode() const __GPU { return _M_filter_mode; } /// /// Returns the sampler's address mode /// __declspec(property(get=get_address_mode)) Concurrency::graphics::address_mode address_mode; Concurrency::graphics::address_mode get_address_mode() const __GPU { return _M_address_mode; } /// /// Returns the sampler's border value /// __declspec(property(get=get_border_color)) Concurrency::graphics::float_4 border_color; Concurrency::graphics::float_4 get_border_color() const __GPU { return _M_border_color; } private: // internal storage abstraction typedef Concurrency::details::_Sampler_descriptor _Sampler_descriptor; // a private constructor to be used for constructing a sampler via interop. sampler(const _Sampler_descriptor & _Descriptor) __CPU_ONLY : _M_sampler_descriptor(_Descriptor), _M_filter_mode(filter_unknown), _M_address_mode (address_unknown), _M_border_color(float_4(0.0f, 0.0f, 0.0f, 0.0f)) { // Although we could query border value from the adopted sampler, but it's not that useful // given that this is the only thing that we could query and when the address mode is not // address_border, border value is not relevant. } _Ret_ _Sampler* _Get_sampler_ptr() const __CPU_ONLY { return _M_sampler_descriptor._Get_sampler_ptr(); } void _Initialize() __CPU_ONLY { // Check if the given filter_mode and address_mode are valid C++ AMP ones if ((_M_filter_mode != filter_point && _M_filter_mode != filter_linear) || (_M_address_mode != address_wrap && _M_address_mode != address_mirror && _M_address_mode != address_clamp && _M_address_mode != address_border)) { throw runtime_exception("Invalid sampler configuration", E_INVALIDARG); } _Sampler_ptr samplerPtr = _Sampler::_Create(_M_filter_mode, _M_address_mode, _M_border_color.r, _M_border_color.g, _M_border_color.b, _M_border_color.a); _M_sampler_descriptor._Set_sampler_ptr(samplerPtr); } const _Sampler_descriptor & _Get_descriptor() const __GPU_ONLY { return _M_sampler_descriptor; } _Sampler_descriptor _M_sampler_descriptor; Concurrency::graphics::filter_mode _M_filter_mode; Concurrency::graphics::address_mode _M_address_mode; float_4 _M_border_color; }; /// /// A texture_view<const _Value_type, _Rank> provides read-only access and sampling capability to a texture. /// /// /// The type of the elements in the texture aggregates. /// /// /// The _Rank of the corresponding extent domain. /// template class texture_view : public details::_Texture_base<_Value_type, _Rank> { public: typedef typename const _Value_type value_type; typedef typename short_vector::type coordinates_type; typedef typename short_vector::type gather_return_type; /// /// Construct a read-only texture_view of a texture _Src on an accelerator. /// /// /// The texture on which the read-only view is created. /// texture_view(const texture<_Value_type, _Rank>& _Src) __GPU_ONLY : _Texture_base(_Src) { // only on the gpu it is not allowed static_assert(_Short_vector_type_traits<_Value_type>::_Num_channels != 1, "Read-only texture_view cannot be created from single-component textures on an accelerator."); } /// /// Construct a texture_view of a texture _Src on the host. /// /// /// The texture on which the read-only view is created. /// texture_view(const texture<_Value_type, _Rank>& _Src) __CPU_ONLY : _Texture_base(_Src) { if (_Get_texture()->_Is_staging()) { throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG); } } /// /// Construct a read-only texture_view with specific range of mipmap levels of a texture _Src on the host. /// /// /// The texture on which the read-only view is created. /// /// /// Most detailed mipmap level for the view. /// /// /// The number of mipmap levels accessible for the view. /// texture_view(const texture<_Value_type, _Rank>& _Src, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY : _Texture_base(_Src, _Most_detailed_mip, _Mip_levels) { if (_Get_texture()->_Is_staging()) { throw runtime_exception("Read-only texture_view cannot be created from a staging texture object.", E_INVALIDARG); } } /// /// Construct a read-only texture_view of a writable texture_view. /// /// /// The writable texture view from which the read-only view is created. /// texture_view(const texture_view<_Value_type, _Rank>& _Other) __CPU_ONLY : _Texture_base(_Other) { } /// /// Construct a read-only texture_view from another read-only texture_view. Both are views of the same texture. /// /// /// The source read-only texture_view. /// texture_view(const texture_view& _Other) __GPU : _Texture_base(_Other) { } /// /// Construct a read-only texture_view from another read-only texture_view. /// Allows narrowing down the accessible range of mipmap levels for the texture_view. /// Both are views of the same texture. /// /// /// The source read-only texture_view. /// /// /// Top level mipmap for the view, relative to the input texture_view. /// /// /// The number of mipmap levels accessible for the view. /// texture_view(const texture_view& _Other, unsigned int _Most_detailed_mip, unsigned int _Mip_levels) __CPU_ONLY : _Texture_base(_Other, _Most_detailed_mip, _Mip_levels) { } /// /// Assignment operator. This read-only texture_view becomes a view of the same texture which _Other is a view of. /// /// /// The source read-only texture_view. /// texture_view& operator=(const texture_view& _Other) __GPU { if (this != &_Other) { _M_extent = _Other._M_extent; _M_texture_descriptor = _Other._M_texture_descriptor; } return *this; } /// /// Assignment operator from a writable texture_view. /// This read-only texture_view becomes a view of the same texture which _Other is a view of. /// /// /// The source writable texture_view. /// texture_view& operator=(const texture_view<_Value_type, _Rank>& _Other) __CPU_ONLY { _M_extent = _Other._M_extent; _M_texture_descriptor = _Other._M_texture_descriptor; return *this; } /// /// Destructor /// ~texture_view() __GPU { } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// value_type operator[] (const index<_Rank>& _Index) const __GPU_ONLY { _Value_type _Tmp; _Texture_read_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, /*_Mip_level=*/0); return _Tmp; } /// /// Get the element value indexed by _I. /// /// /// The index. /// /// /// The element value indexed by _I. /// value_type operator[] (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "value_type operator[](int) is only permissible on texture_view."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The element value indexed by _Index. /// value_type operator() (const index<_Rank>& _Index) const __GPU_ONLY { return (*this)[_Index]; } /// /// Get the element value indexed by _I0 /// /// /// The index. /// /// /// The element value indexed by _I0. /// value_type operator() (int _I0) const __GPU_ONLY { static_assert(_Rank == 1, "value_type texture_view::operator()(int) is only permissible on texture_view."); return (*this)[index<1>(_I0)]; } /// /// Get the element value indexed by (_I0,_I1) /// /// /// The most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1) /// value_type operator() (int _I0, int _I1) const __GPU_ONLY { static_assert(_Rank == 2, "value_type texture_view::operator()(int, int) is only permissible on texture_view."); return (*this)[index<2>(_I0, _I1)]; } /// /// Get the element value indexed by (_I0,_I1,_I2) /// /// /// The most-significant component of the index /// /// /// The next-to-most-significant component of the index /// /// /// The least-significant component of the index /// /// /// The element value indexed by (_I0,_I1,_I2) /// value_type operator() (int _I0, int _I1, int _I2) const __GPU_ONLY { static_assert(_Rank == 3, "value_type texture_view::operator()(int, int, int) is only permissible on texture_view."); return (*this)[index<3>(_I0, _I1, _I2)]; } /// /// Get the element value indexed by _Index. /// /// /// The index. /// /// /// The mipmap level from which we should get indexed value. /// The default value 0 represents most detailed mipmap level. /// /// /// The element value indexed by _Index. /// value_type get(const index<_Rank>& _Index, unsigned int _Mip_level = 0) const __GPU_ONLY { _Value_type _Tmp; _Texture_read_helper, _Rank>::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Index, _Mip_level); return _Tmp; } /// /// Sample the texture at the given coordinates and level of detail using the specified sampling configuration. /// /// /// The sampler that configures the sampling operation. /// /// /// Coordinate vector for sampling. /// /// /// The value specifies the mipmap level to sample from. /// Fractional value is used to interpolate between two mipmap levels. /// /// /// The interpolated value. /// value_type sample(const sampler& _Sampler, const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type."); _Value_type _Tmp; _Texture_sample_helper::func(_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, 4 /*Sampling*/, _Level_of_detail); return _Tmp; } /// /// Sample the texture at the given coordinates and level of detail using the predefined sampling configuration. /// /// /// The filter mode of the predefined sampler to be used. /// /// /// The address mode of the predefined sampler to be used. /// /// /// Coordinate vector for sampling. /// /// /// The value specifies the mipmap level to sample from. /// Fractional value is used to interpolate between two mipmap levels. /// /// /// The interpolated value. /// template value_type sample(const coordinates_type& _Coord, float _Level_of_detail = 0.0f) const __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "sample is not allowed for uint component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "sample is not allowed for int component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "sample is not allowed for double component types in the texture value_type."); static_assert((_Filter_mode == filter_point || _Filter_mode == filter_linear), "Invalid filter mode for sample method."); static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border), "Invalid address mode for sample method."); _Value_type _Tmp; // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution! _Texture_predefined_sample_helper::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, _Filter_mode << 16 |_Address_mode, 4 /*Sampling*/, _Level_of_detail); return _Tmp; } /// /// Sample the texture at the given coordinates using the specified sampling configuration and return the red (x) component of the four texels samples. /// /// /// The sampler that configures the sampling operation. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the red (x) component of the 4 texel values sampled. /// const gather_return_type gather_red(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY { return _Gather(_Sampler, _Coord, 0); } /// /// Sample the texture at the given coordinates using the specified sampling configuration and return the green (y) component of the four texels samples. /// /// /// The sampler that configures the sampling operation. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the green (y) component of the 4 texel values sampled. /// const gather_return_type gather_green(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY { static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type."); return _Gather(_Sampler, _Coord, 1); } /// /// Sample the texture at the given coordinates using the specified sampling configuration and return the blue (z) component of the four texels samples. /// /// /// The sampler that configures the sampling operation. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the blue (z) component of the 4 texel values sampled. /// const gather_return_type gather_blue(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY { static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type."); return _Gather(_Sampler, _Coord, 2); } /// /// Sample the texture at the given coordinates using the specified sampling configuration and return the alpha (w) component of the four texels samples. /// /// /// The sampler that configures the sampling operation. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the alpha (w) component of the 4 texel values sampled. /// const gather_return_type gather_alpha(const sampler& _Sampler, const coordinates_type& _Coord) const __GPU_ONLY { static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type."); return _Gather(_Sampler, _Coord, 3); } /// /// Sample the texture at the given coordinates using the predefined sampling configuration and return the red (x) component of the four texels samples. /// /// /// The address mode of the predefined sampler to be used. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the red (x) component of the 4 texel values sampled. /// template const gather_return_type gather_red(const coordinates_type& _Coord) const __GPU_ONLY { return _Gather<_Address_mode>(_Coord, 0); } /// /// Sample the texture at the given coordinates using the predefined sampling configuration and return the green (y) component of the four texels samples. /// /// /// The address mode of the predefined sampler to be used. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the green (y)component of the 4 texel values sampled. /// template const gather_return_type gather_green(const coordinates_type& _Coord) const __GPU_ONLY { static_assert(1 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_green is valid only for textures with 2 or more components in the value_type."); return _Gather<_Address_mode>(_Coord, 1); } /// /// Sample the texture at the given coordinates using the predefined sampling configuration and return the blue (z) component of the four texels samples. /// /// /// The address mode of the predefined sampler to be used. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the blue (z) component of the 4 texel values sampled. /// template const gather_return_type gather_blue(const coordinates_type& _Coord) const __GPU_ONLY { static_assert(2 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_blue is valid only for textures with 3 or more components in the value_type."); return _Gather<_Address_mode>(_Coord, 2); } /// /// Sample the texture at the given coordinates using the predefined sampling configuration and return the alpha (w) component of the four texels samples. /// /// /// The address mode of the predefined sampler to be used. /// /// /// Coordinate vector for sampling. /// /// /// Rank 4 short vector containing the alpha (w) component of the 4 texel values sampled. /// template const gather_return_type gather_alpha(const coordinates_type& _Coord) const __GPU_ONLY { static_assert(3 < _Short_vector_type_traits<_Value_type>::_Num_channels, "gather_alpha is valid only for textures with 4 components in the value_type."); return _Gather<_Address_mode>(_Coord, 3); } private: const gather_return_type _Gather(const sampler& _Sampler, const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type."); static_assert(rank == 2, "gather methods are only permissible on texture_view."); gather_return_type _Tmp; _Texture_sample_helper::func(_M_texture_descriptor._M_data_ptr, _Sampler._Get_descriptor()._M_data_ptr, &_Tmp, _Coord, _Component, /*_Level_of_detail=*/0.0f); return _Tmp; } template const gather_return_type _Gather(const coordinates_type& _Coord, unsigned int _Component) const __GPU_ONLY { static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Uint_type, "gather is not allowed for uint component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Int_type, "gather is not allowed for int component types in the texture value_type."); static_assert(_Short_vector_type_traits<_Value_type>::_Format_base_type_id != _Double_type, "gather is not allowed for double component types in the texture value_type."); static_assert(rank == 2, "gather methods are only permissible on texture_view."); static_assert((_Address_mode == address_wrap || _Address_mode == address_clamp || _Address_mode == address_mirror || _Address_mode == address_border), "Invalid address mode for gather methods."); gather_return_type _Tmp; // Predefined sampler id is constructed as filter_mode << 16 | address_mode. This is a contract between BE and runtime. Modify with caution! // gather only used the address_mode of the sampler, internally we use filter_point so that the predefined sampler id scheme is same for both sample and gather. _Texture_predefined_sample_helper::func(_M_texture_descriptor._M_data_ptr, &_Tmp, _Coord, filter_point << 16 |_Address_mode, _Component, /*_Level_of_detail=*/0.0f); return _Tmp; } }; namespace details { template struct texture_traits { static const bool is_texture = false; static const bool is_writable = false; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = true; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = false; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = true; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = true; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = true; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = false; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = false; }; template struct texture_traits> { static const bool is_texture = true; static const bool is_writable = true; }; // The helper function used by ETW and copy functions to calculate number of bytes for the copy operation given input section template unsigned int _Get_section_size(const _Texture_base<_Value_type, _Rank> &_Tex, const extent<_Rank> &_Extent) { _Texture* _Tex_ptr = _Get_texture(_Tex); _Texture_descriptor _Tex_desc = _Get_texture_descriptor(_Tex); return _Tex_ptr->_Get_data_length(_Tex_desc._Get_most_detailed_mipmap_level(), _Tex_desc._Get_view_mipmap_levels(), _Get_dimensions(_Extent, /*Mip_offset=*/0).data()); } template _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last, _In_ _Texture *_Dst, const size_t *_Dst_offset, unsigned int _Dst_mipmap_level, const size_t *_Copy_extent, const size_t *_Preferred_copy_chunk_extent = NULL) { _ASSERTE(_Dst != nullptr); _ASSERTE(_Dst_offset != nullptr); _ASSERTE(_Copy_extent != nullptr); _ASSERTE((unsigned int)std::distance(_First, _Last) >= (_Copy_extent[0] * _Copy_extent[1] * _Copy_extent[2])); // The copy region should be within the bounds of the destination texture _ASSERTE((_Dst_offset[0] + _Copy_extent[0]) <= _Dst->_Get_width(_Dst_mipmap_level)); _ASSERTE((_Dst_offset[1] + _Copy_extent[1]) <= _Dst->_Get_height(_Dst_mipmap_level)); _ASSERTE((_Dst_offset[2] + _Copy_extent[2]) <= _Dst->_Get_depth(_Dst_mipmap_level)); #pragma warning( push ) #pragma warning( disable : 4127 ) // conditional expression is constant if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Dst->_Get_bits_per_element() != (8U * sizeof(_Value_type)))) { throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG); } #pragma warning( pop ) // If the dest is accessible on the host we can perform the copy entirely on the host if (_Dst->_Get_host_ptr() != NULL) { // We have made sure that the three multiplications below won't cause integer overflow when creating the texture _ASSERTE(((_Dst->_Get_bits_per_element() * _Copy_extent[0]) % (8U * sizeof(_Value_type))) == 0); size_t _Row_size = (_Dst->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes size_t _Depth_slice_size = _Row_size * _Copy_extent[1]; size_t _Row_pitch = _Dst->_Get_row_pitch(); size_t _Depth_pitch = _Dst->_Get_depth_pitch(); _ASSERTE(_Row_pitch >= _Row_size); _ASSERTE(_Depth_pitch >= _Depth_slice_size); size_t _Dst_offset_in_bytes = ((_Dst_offset[0] * _Dst->_Get_bits_per_element()) >> 3) + (_Dst_offset[1] * _Row_pitch) + (_Dst_offset[2] * _Depth_pitch); unsigned char *_PDest = reinterpret_cast(_Dst->_Get_host_ptr()) + _Dst_offset_in_bytes; _Copy_data_on_host(_Dst->_Get_rank(), _First, reinterpret_cast<_Value_type*>(_PDest), _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2], _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type)); return _Event(); } // The dest is not accessbile on the host; we need to copy src to // a temporary staging texture and launch a copy from the staging texture // to the dest texture. _Event _Ev; // Determine the copy chunk extent std::array _Copy_chunk_extent; if (_Preferred_copy_chunk_extent != NULL) { std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin()); } else { _Get_preferred_copy_chunk_extent(_Dst->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Dst->_Get_bits_per_element(), _Copy_chunk_extent.data()); } std::array _Curr_copy_offset; std::copy(&_Dst_offset[0], &_Dst_offset[3], _Curr_copy_offset.begin()); std::array _Remaining_copy_extent; std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin()); bool _Truncated_copy = false; do { _Texture_ptr _Dst_staging_tex_ptr; std::array _Curr_copy_extent; _Truncated_copy = _Get_chunked_staging_texture(_Dst, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Dst_staging_tex_ptr); // Now copy from the src pointer to the temp staging texture _Dst_staging_tex_ptr->_Map_buffer(_Write_access, true /* _Wait */); std::array _Dst_staging_tex_offset; _Dst_staging_tex_offset.fill(0); _Event _Temp_ev = _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_staging_tex_ptr, _Dst_staging_tex_offset.data(), /*_Dst_mipmap_level=*/0, _Curr_copy_extent.data(), _Copy_chunk_extent.data()); // Now chain a copy from the temporary staging texture to the _Dst texture _Texture_ptr _Dst_tex_ptr = _Dst; _Temp_ev = _Temp_ev._Add_continuation(std::function<_Event()>([_Dst_staging_tex_ptr, _Dst_tex_ptr, _Curr_copy_extent, _Dst_staging_tex_offset, _Curr_copy_offset, _Dst_mipmap_level]() mutable -> _Event { return _Dst_staging_tex_ptr->_Copy_to_async(_Dst_tex_ptr, _Curr_copy_extent.data(), _Dst_staging_tex_offset.data(), _Curr_copy_offset.data(), /*_Src_mipmap_level=*/0, _Dst_mipmap_level); })); _Ev = _Ev._Add_event(_Temp_ev); // Now adjust the _Src and _Dst offsets for the remaining part of the copy if (_Truncated_copy) { // The offset only needs to be adjusted in the most significant dimension _Curr_copy_offset[_Dst->_Get_rank() - 1] += _Curr_copy_extent[_Dst->_Get_rank() - 1]; std::advance(_First, (((_Curr_copy_extent[0] * _Dst->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]); } } while (_Truncated_copy); return _Ev; } template _Event _Copy_async_impl(_Texture *_Tex, const size_t *_Tex_offset, unsigned int _Src_mipmap_level, const size_t *_Copy_extent, _Output_iterator _First, const size_t *_Preferred_copy_chunk_extent = NULL) { _ASSERTE(_Tex != nullptr); _ASSERTE(_Tex_offset != nullptr); _ASSERTE(_Copy_extent != nullptr); // The copy region should be within the bounds of the source texture _ASSERTE((_Tex_offset[0] + _Copy_extent[0]) <= _Tex->_Get_width(_Src_mipmap_level)); _ASSERTE((_Tex_offset[1] + _Copy_extent[1]) <= _Tex->_Get_height(_Src_mipmap_level)); _ASSERTE((_Tex_offset[2] + _Copy_extent[2]) <= _Tex->_Get_depth(_Src_mipmap_level)); #pragma warning( push ) #pragma warning( disable : 4127 ) // conditional expression is constant if ((sizeof(_Value_type) > sizeof(unsigned char)) && (_Tex->_Get_bits_per_element() != (8U * sizeof(_Value_type)))) { throw runtime_exception("Iterator-based copy is not supported on textures where the size of the _Value_type is not equal to the texel size.", E_INVALIDARG); } #pragma warning( pop ) // If the texture is available on the host then we can perform the copy entirely on the host if (_Tex->_Get_host_ptr() != nullptr) { // We have made sure that the three multiplications below won't cause integer overflow when creating the texture _ASSERTE(((_Tex->_Get_bits_per_element() * _Copy_extent[0]) % 8U) == 0); size_t _Row_size = (_Tex->_Get_bits_per_element() * _Copy_extent[0]) >> 3; // in bytes size_t _Depth_slice_size = _Row_size * _Copy_extent[1]; size_t _Row_pitch = _Tex->_Get_row_pitch(); size_t _Depth_pitch = _Tex->_Get_depth_pitch(); _ASSERTE(_Row_pitch >= _Row_size); _ASSERTE(_Depth_pitch >= _Depth_slice_size); size_t _Tex_offset_in_bytes = ((_Tex_offset[0] * _Tex->_Get_bits_per_element()) >> 3) + (_Tex_offset[1] * _Row_pitch) + (_Tex_offset[2] * _Depth_pitch); unsigned char *_PTex = reinterpret_cast(_Tex->_Get_host_ptr()) + _Tex_offset_in_bytes; _Copy_data_on_host(_Tex->_Get_rank(), reinterpret_cast<_Value_type*>(_PTex), _First, _Row_size / sizeof(_Value_type), _Copy_extent[1], _Copy_extent[2], _Row_pitch, _Depth_pitch, _Row_size / sizeof(_Value_type), _Depth_slice_size / sizeof(_Value_type)); return _Event(); } // The texture is not accessbile on the host; we need to copy to/from a staging // texture before the copy to the destination. This is done in chunks, such that // we can concurrently copy from the source texture to a staging texture while // copying from a staging texture from a previous chunk to the destination. _Event _Ev; // Determine the copy chunk extent std::array _Copy_chunk_extent; if (_Preferred_copy_chunk_extent != nullptr) { std::copy(&_Preferred_copy_chunk_extent[0], &_Preferred_copy_chunk_extent[3], _Copy_chunk_extent.begin()); } else { _Get_preferred_copy_chunk_extent(_Tex->_Get_rank(), _Copy_extent[0], _Copy_extent[1], _Copy_extent[2], _Tex->_Get_bits_per_element(), _Copy_chunk_extent.data()); } std::array _Curr_copy_offset; std::copy(&_Tex_offset[0], &_Tex_offset[3], _Curr_copy_offset.begin()); std::array _Remaining_copy_extent; std::copy(&_Copy_extent[0], &_Copy_extent[3], _Remaining_copy_extent.begin()); bool _Truncated_copy = false; _Texture_ptr _Staging_tex_ptr; std::array _Curr_copy_extent; _Truncated_copy = _Get_chunked_staging_texture(_Tex, _Copy_chunk_extent.data(), _Remaining_copy_extent.data(), _Curr_copy_extent.data(), &_Staging_tex_ptr); // Now copy into the temp staging texture std::array _Staging_tex_offset; _Staging_tex_offset.fill(0); _Event _Temp_ev = _Copy_async_impl(_Tex, _Curr_copy_offset.data(), _Src_mipmap_level, _Staging_tex_ptr._Get_ptr(), _Staging_tex_offset.data(), /*_Dst_mipmap_level=*/0, _Curr_copy_extent.data(), _Copy_chunk_extent.data()); _Ev = _Ev._Add_event(_Temp_ev); // If we have finished our copy, we just need to add a continuation to copy // from the temporary staging texture to the _Dst pointer if (!_Truncated_copy) { return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr, _Curr_copy_extent, _Staging_tex_offset, _Copy_chunk_extent, _First]() mutable -> _Event { return _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data()); })); } else { // The copy was truncated. We need to recursively perform the rest of the copy _Texture_ptr _Tex_ptr = _Tex; _Curr_copy_offset[_Tex->_Get_rank() - 1] += _Curr_copy_extent[_Tex->_Get_rank() - 1]; return _Ev._Add_continuation(std::function<_Event()>([_Staging_tex_ptr, _First, _Curr_copy_extent, _Staging_tex_offset, _Tex_ptr, _Curr_copy_offset, _Remaining_copy_extent, _Copy_chunk_extent, _Src_mipmap_level]() mutable -> _Event { // Initiate copying of the remaining portion _Output_iterator _New_dst_iter = _First; _Advance_output_iterator(_New_dst_iter, (((_Curr_copy_extent[0] * _Tex_ptr->_Get_bits_per_element()) >> 3) / sizeof(_Value_type)) * _Curr_copy_extent[1] * _Curr_copy_extent[2]); _Event _Ev1 = _Copy_async_impl<_Output_iterator, _Value_type>(_Tex_ptr, _Curr_copy_offset.data(), _Src_mipmap_level, _Remaining_copy_extent.data(), _New_dst_iter, _Copy_chunk_extent.data()); // Now copy the data from the temp staging buffer to the _Dst pointer _Event _Ev2 = _Copy_async_impl<_Output_iterator, _Value_type>(_Staging_tex_ptr, _Staging_tex_offset.data(), /*_Src_mipmap_level=*/0, _Curr_copy_extent.data(), _First, _Copy_chunk_extent.data()); return _Ev2._Add_event(_Ev1); })); } } template _Event _Copy_async_impl(const void * _Src, unsigned int _Src_byte_size, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) { _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent); if (_Dst.get_mipmap_levels() > 1) { throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG); } if (_Src_byte_size < _Get_section_size(_Dst, _Copy_extent)) { if (_Dst.extent == _Copy_extent) { throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the total size of _Dst.", E_INVALIDARG); } else { throw runtime_exception("Invalid _Src_byte_size argument. _Src_byte_size is smaller than the provided section of _Dst.", E_INVALIDARG); } } _Texture *_Dst_tex_ptr = _Get_texture(_Dst); std::array _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0); std::array _Dst_offset_arr = _Get_indices(_Dst_offset); auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast(_Src)); auto _Last = stdext::make_unchecked_array_iterator(reinterpret_cast(_Src) + _Src_byte_size); return _Copy_async_impl(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data()); } template _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size) { _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent); if (_Src.get_mipmap_levels() > 1) { throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG); } if (_Get_section_size(_Src, _Copy_extent) > _Dst_byte_size) { if (_Src.extent == _Copy_extent) { throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the size of _Src.", E_INVALIDARG); } else { throw runtime_exception("Invalid _Dst_byte_size argument. _Dst_byte_size is smaller than the provided section of _Src.", E_INVALIDARG); } } _Texture *_Src_tex_ptr = _Get_texture(_Src); std::array _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0); std::array _Src_offset_arr = _Get_indices(_Src_offset); auto _First = stdext::make_unchecked_array_iterator(reinterpret_cast(_Dst)); return _Copy_async_impl(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _First); } template _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank> &_Src, const index<_Rank> &_Src_offset, const extent<_Rank> &_Copy_extent, _Output_iterator _Dest_iter) { _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent); if (_Src.get_mipmap_levels() > 1) { throw runtime_exception("Invalid source - multiple mipmap levels cannot be copied to destination", E_INVALIDARG); } _Texture *_Src_tex_ptr = _Get_texture(_Src); std::array _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0); std::array _Src_offset_arr = _Get_indices(_Src_offset); return _Copy_async_impl<_Output_iterator, _Value_type>(_Src_tex_ptr, _Src_offset_arr.data(), _Get_texture_descriptor(_Src)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data(), _Dest_iter); } template _Event _Copy_async_impl(_Input_iterator _First, _Input_iterator _Last, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) { _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent); if (static_cast(std::distance(_First, _Last)) < _Copy_extent.size()) { throw runtime_exception("Inadequate amount of data supplied through the iterators", E_INVALIDARG); } if (_Dst.get_mipmap_levels() > 1) { throw runtime_exception("Invalid destination - multiple mipmap levels cannot be copied from source", E_INVALIDARG); } std::array _Copy_extent_arr = _Get_dimensions(_Copy_extent, /*_Mip_offset=*/0); std::array _Dst_offset_arr = _Get_indices(_Dst_offset); _Texture *_Dst_tex_ptr = _Get_texture(_Dst); return _Copy_async_impl<_Input_iterator, _Value_type>(_First, _Last, _Dst_tex_ptr, _Dst_offset_arr.data(), _Get_texture_descriptor(_Dst)._Get_most_detailed_mipmap_level(), _Copy_extent_arr.data()); } template _Event _Copy_async_impl(const _Texture_base<_Value_type, _Rank>& _Src, const index<_Rank> &_Src_offset, const _Texture_base<_Value_type, _Rank>& _Dst, const index<_Rank> &_Dst_offset, const extent<_Rank> &_Copy_extent) { _Is_valid_section(_Src.extent, _Src_offset, _Copy_extent); _Is_valid_section(_Dst.extent, _Dst_offset, _Copy_extent); _Texture_descriptor _Src_tex_desc = _Get_texture_descriptor(_Src); _Texture_descriptor _Dst_tex_desc = _Get_texture_descriptor(_Dst); if (_Src_tex_desc._Get_view_mipmap_levels() != _Dst_tex_desc._Get_view_mipmap_levels()) { throw runtime_exception("The source and destination textures must have the exactly the same number of mipmap levels for texture copy.", E_INVALIDARG); } bool _Is_whole_texture_copy = (_Src_offset == _Dst_offset && _Src_offset == index<_Rank>() && _Src.extent == _Dst.extent && _Src.extent == _Copy_extent); if (_Src_tex_desc._Get_view_mipmap_levels() > 1 && !_Is_whole_texture_copy) { throw runtime_exception("Sections are not allowed when copy involves multiple mipmap levels", E_INVALIDARG); } if (_Src_tex_desc._Are_mipmap_levels_overlapping(&_Dst_tex_desc)) { throw runtime_exception("The source and destination are overlapping areas on the same texture", E_INVALIDARG); } _Texture* _Src_tex = _Get_texture(_Src); _Texture* _Dst_tex = _Get_texture(_Dst); // Formats must be identical for non-adopted textures. Textures created through D3D interop are not subject to this test // to allow copy between related, but not identical, formats. Attempting to copy between unrelated formats through interop // will result in exceptions in debug mode and undefined behavior in release mode. if (!_Src_tex->_Is_adopted() && !_Dst_tex->_Is_adopted() && (_Src_tex->_Get_texture_format() != _Dst_tex->_Get_texture_format())) { throw runtime_exception("The source and destination textures are not compatible.", E_INVALIDARG); } std::array _Src_offset_arr = _Get_indices(_Src_offset); std::array _Dst_offset_arr = _Get_indices(_Dst_offset); _Event _Copy_event; unsigned int _Src_most_detailed_mipmap_level = _Src_tex_desc._Get_most_detailed_mipmap_level(); unsigned int _Dst_most_detailed_mipmap_level = _Dst_tex_desc._Get_most_detailed_mipmap_level(); // Copy all mipmap levels from source to destination one by one. // Note that the offsets are not allowed therefore only dimensions need to be updated for subsequent mipmap levels for (unsigned int _Mip_offset = 0; _Mip_offset < _Src_tex_desc._Get_view_mipmap_levels(); ++_Mip_offset) { std::array _Copy_extent_arr = _Get_dimensions(_Copy_extent, _Mip_offset); auto _Step_event = _Copy_async_impl(_Src_tex, _Src_offset_arr.data(), _Src_most_detailed_mipmap_level + _Mip_offset, _Dst_tex, _Dst_offset_arr.data(), _Dst_most_detailed_mipmap_level + _Mip_offset, _Copy_extent_arr.data()); _Copy_event = _Copy_event._Add_event(_Step_event); } return _Copy_event; } } // namespace details /// /// Copies the contents of the source texture into the destination host buffer. /// /// /// The rank of the source texture. /// /// /// The type of the elements of the source texture. /// /// /// The source texture or texture_view. /// /// /// The destination host buffer. /// /// /// Number of bytes in the destination buffer. /// template ::is_texture, void>::type> void copy(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size) { auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Src.extent)); details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies the contents of a section of the source texture into the destination host buffer. /// /// /// The rank of the source texture. /// /// /// The type of the elements of the source texture. /// /// /// The source texture or texture_view. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// The destination host buffer. /// /// /// Number of bytes in the destination buffer. /// template ::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size) { auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Copy_extent)); details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies the contents of the source host buffer into the destination texture _Dst. /// /// /// The rank of the destination texture. /// /// /// The type of the destination texture or texture_view. /// /// /// The source host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// The destination texture or texture_view. /// template ::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies the contents of the source host buffer into a section of the destination texture _Dst. /// /// /// The type of the destination texture or texture_view. /// /// /// The source host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// The destination texture or texture_view. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// template ::is_texture, void>::type> void copy(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Copy_extent)); details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Asynchronously copies the contents of the source texture into the destination host buffer. /// /// /// The rank of the source texture. /// /// /// The type of the source texture. /// /// /// The source texture or texture_view. /// /// /// The destination host buffer. /// /// /// Number of bytes in the destination buffer. /// /// /// A future upon which to wait for the operation to complete. /// template::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, _Out_ void * _Dst, unsigned int _Dst_byte_size) { auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Src.extent)); _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst, _Dst_byte_size); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies the contents of the provided section of the source texture into the destination host buffer. /// /// /// The type of the source texture. /// /// /// The source texture or texture_view. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// The destination host buffer. /// /// /// Number of bytes in the destination buffer. /// /// /// A future upon which to wait for the operation to complete. /// template::is_texture, void>::type> concurrency::completion_future copy_async(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, _Out_ void * _Dst, unsigned int _Dst_byte_size) { auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Copy_extent)); _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst, _Dst_byte_size); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies the contents of the source host buffer into the destination texture _Dst. /// /// /// The type of the destination texture. /// /// /// The source host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// The destination texture or texture_view. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, index<_Dst_type::rank>(), _Dst.extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies the contents of the source host buffer into a section of the destination texture _Dst. /// /// /// The type of the elements of the destination texture. /// /// /// The source host buffer. /// /// /// Number of bytes in the source buffer. /// /// /// The destination texture or texture_view. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture, void>::type> concurrency::completion_future copy_async(const void * _Src, unsigned int _Src_byte_size, _Dst_type & _Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Copy_extent)); _Event _Ev = details::_Copy_async_impl(_Src, _Src_byte_size, _Dst, _Dst_offset, _Copy_extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Copies data from the pair of source iterators into the destination texture _Dst. /// /// /// The input iterator type. /// /// /// The type of the destination texture. /// /// /// The starting iterator for the copy. /// /// /// The ending iterator for the copy. /// /// /// The destination texture or texture_view. /// template ::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); details::_Copy_async_impl(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies data from the pair of source iterators into a section of the destination texture _Dst. /// /// /// The input iterator type. /// /// /// The type of the destination texture. /// /// /// The starting iterator for the copy. /// /// /// The ending iterator for the copy. /// /// /// The destination texture or texture_view. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// template ::is_texture, void>::type> void copy(InputIterator _First, InputIterator _Last, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Copy_extent)); details::_Copy_async_impl(_First, _Last, _Dst, _Dst_offset, _Copy_extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies data from the source texture _Src into an output iterator. /// /// /// The type of the source texture. /// /// /// The output iterator type. /// /// /// The starting iterator for the copy output. /// template ::is_texture && !details::texture_traits::is_texture, void>::type> void copy(const _Src_type &_Src, OutputIterator _Dst) { auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Src.extent)); details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies data from a section of the source texture _Src into an output iterator. /// /// /// The type of the source texture. /// /// /// The output iterator type. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// The starting iterator for the copy output. /// template ::is_texture && !details::texture_traits::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst) { auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Copy_extent)); details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies data from the source texture _Src into the destination texture _Dst. /// /// /// The type of the source texture. /// /// /// The type of the destination texture. /// /// /// The source texture from which to copy. /// /// /// The destination texture into which to copy. /// template ::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, _Dst_type &_Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); if (_Src.extent != _Dst.extent) { throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG); } auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Dst, index<_Dst_type::rank>(), _Dst.extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Copies data from a section of the source texture _Src into a section of the destination texture _Dst. /// /// /// The type of the source texture. /// /// /// The type of the destination texture. /// /// /// The source texture from which to copy. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The destination texture into which to copy. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// template ::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> void copy(const _Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Span_id = concurrency::details::_Get_amp_trace()->_Start_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Src, _Copy_extent)); details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent)._Get(); concurrency::details::_Get_amp_trace()->_Write_end_event(_Span_id); } /// /// Asynchronously copies data from the pair of source iterators into the destination texture _Dst. /// /// /// The input iterator type. /// /// /// The type of the destination texture. /// /// /// The starting iterator for the copy. /// /// /// The ending iterator for the copy. /// /// /// The destination texture or texture_view. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); _Event _Ev = details::_Copy_async_impl(_First, _Last, _Dst, index<_Dst_type::rank>(), _Dst.extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies data from the pair of source iterators into a section of the destination texture _Dst. /// /// /// The input iterator type. /// /// /// The type of the destination texture. /// /// /// The starting iterator for the copy. /// /// /// The ending iterator for the copy. /// /// /// The destination texture or texture_view. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture, void>::type> concurrency::completion_future copy_async(InputIterator _First, InputIterator _Last, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Dst_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(nullptr, concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Copy_extent)); _Event _Ev = details::_Copy_async_impl(_First, _Last, _Dst, _Dst_offset, _Copy_extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies data from the source texture _Src into an output iterator. /// /// /// The type of the source texture. /// /// /// The output iterator type. /// /// /// The starting iterator for the copy output. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture && !details::texture_traits::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, OutputIterator _Dst) { auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Src.extent)); _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Src.extent, _Dst); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies data from a section of the source texture _Src into an output iterator. /// /// /// The type of the source texture. /// /// /// The output iterator type. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// The starting iterator for the copy output. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture && !details::texture_traits::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, const extent<_Src_type::rank> &_Copy_extent, OutputIterator _Dst) { auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), nullptr, _Get_section_size(_Src, _Copy_extent)); _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Copy_extent, _Dst); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies data from the source texture _Src into the destination texture _Dst. /// /// /// The type of the source texture. /// /// /// The type of the destination texture. /// /// /// The source texture from which to copy. /// /// /// The destination texture into which to copy. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, _Dst_type &_Dst) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); if (_Src.extent != _Dst.extent) { throw runtime_exception("The source and destination textures must have the exactly the same extent for whole-texture copy.", E_INVALIDARG); } auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Dst, _Dst.extent)); _Event _Ev = details::_Copy_async_impl(_Src, index<_Src_type::rank>(), _Dst, index<_Dst_type::rank>(), _Dst.extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } /// /// Asynchronously copies data from a section of the source texture _Src into the destination texture _Dst. /// /// /// The type of the source texture. /// /// /// The type of the destination texture. /// /// /// The source texture from which to copy. /// /// /// The offset into the source texture from which to begin copying. /// /// /// The destination texture into which to copy. /// /// /// The offset into the destination texture to which to begin copying. /// /// /// The extent of the texture section to copy. /// /// /// A future upon which to wait for the operation to complete. /// template ::is_texture && details::texture_traits<_Dst_type>::is_texture, void>::type> concurrency::completion_future copy_async(_Src_type &_Src, const index<_Src_type::rank> &_Src_offset, _Dst_type &_Dst, const index<_Dst_type::rank> &_Dst_offset, const extent<_Src_type::rank> &_Copy_extent) { static_assert(details::texture_traits<_Dst_type>::is_writable, "Destination is not a writable texture type."); auto _Async_op_id = concurrency::details::_Get_amp_trace()->_Launch_async_copy_event_helper(concurrency::details::_Get_texture_descriptor(_Src), concurrency::details::_Get_texture_descriptor(_Dst), _Get_section_size(_Src, _Copy_extent)); _Event _Ev = details::_Copy_async_impl(_Src, _Src_offset, _Dst, _Dst_offset, _Copy_extent); return concurrency::details::_Get_amp_trace()->_Start_async_op_wait_event_helper(_Async_op_id, _Ev); } namespace details { template Concurrency::extent<_Rank> _Make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, _Texture_base_type_id _Id, _Inout_ _Texture ** _Tex, DXGI_FORMAT _View_format) __CPU_ONLY { if (_D3D_texture == NULL) { throw runtime_exception("NULL D3D texture pointer.", E_INVALIDARG); } if (!Concurrency::details::_Is_D3D_accelerator_view(_Av)) { throw runtime_exception("Cannot create D3D texture on a non-D3D accelerator_view.", E_INVALIDARG); } _Texture * _Tex_ptr = _Texture::_Adopt_texture(_Rank, _Id, _D3D_texture, _Av, _View_format); if (_Tex_ptr->_Is_staging()) { _Tex_ptr->_Map_buffer(_Write_access, true /* _Wait */); } Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Create_extent<_Rank>(_Tex_ptr->_Get_width(), _Tex_ptr->_Get_height(), _Tex_ptr->_Get_depth()); _Is_valid_extent(_Ext); details::_Is_valid_data_length(_Ext.size(), _Tex_ptr->_Get_bits_per_element()); *_Tex = _Tex_ptr; return _Ext; } #pragma warning( pop ) } // namespace details namespace direct3d { /// /// Get the D3D texture interface underlying a texture. /// /// /// The rank of the texture to get underlying D3D texture of. /// /// /// The type of the elements in the texture to get underlying D3D texture of. /// /// /// A texture on a D3D accelerator_view for which the underlying D3D texture interface is returned. /// /// /// The IUnknown interface pointer corresponding to the D3D texture underlying the texture. /// template _Ret_ IUnknown *get_texture(const texture<_Value_type, _Rank> &_Texture) __CPU_ONLY { return Concurrency::details::_D3D_interop::_Get_D3D_texture(Concurrency::details::_Get_texture(_Texture)); } /// /// Get the D3D texture interface underlying a texture viewed by a writeonly_texture_view. /// /// /// The rank of the texture to get underlying D3D texture of. /// /// /// The type of the elements in the texture to get underlying D3D texture of. /// /// /// A writeonly_texture_view of a texture on a D3D accelerator_view for which the underlying D3D texture interface is returned. /// /// /// The IUnknown interface pointer corresponding to the D3D texture underlying the texture. /// #pragma warning( push ) #pragma warning( disable : 4996 ) //writeonly_texture_view is deprecated template _Ret_ IUnknown *get_texture(const writeonly_texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY { return Concurrency::details::_D3D_interop::_Get_D3D_buffer(Concurrency::details::_Get_texture(_Texture)); } #pragma warning( pop ) /// /// Get the D3D texture interface underlying a texture viewed by a texture_view. /// /// /// The rank of the texture to get underlying D3D texture of. /// /// /// The type of the elements in the texture to get underlying D3D texture of. /// /// /// A texture_view of a texture on a D3D accelerator_view for which the underlying D3D texture interface is returned. /// /// /// The IUnknown interface pointer corresponding to the D3D texture underlying the texture. /// template _Ret_ IUnknown *get_texture(const texture_view<_Value_type, _Rank> &_Texture) __CPU_ONLY { return Concurrency::details::_D3D_interop::_Get_D3D_buffer(Concurrency::details::_Get_texture(_Texture)); } /// /// Create an texture from a D3D texture interface pointer, optionally using the specified DXGI format for all /// views on this texture. /// /// /// The rank of the texture to be created from the D3D texture. /// /// /// The type of the elements of the texture to be created from the D3D texture. /// /// /// A D3D accelerator view on which the texture is to be created. /// /// /// IUnknown interface pointer of the D3D texture to create the texture from. /// /// /// The DXGI format to use for views created from this texture. Pass DXGI_FORMAT_UNKNOWN (the default) /// to derive the format from the underlying format of _D3D_texture and the _Value_type of this template. /// The provided format must be compatible with the underlying format of _D3D_texture. /// /// /// A texture using the provided D3D texture. /// template texture<_Value_type, _Rank> make_texture(const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_texture, DXGI_FORMAT _View_format /*= DXGI_FORMAT_UKNNOWN*/) __CPU_ONLY { _Texture * _Tex_ptr = NULL; #pragma warning( suppress: 6326 ) // Potential comparison of a constant with another constant Concurrency::extent<_Rank> _Ext = Concurrency::graphics::details::_Make_texture<_Rank>(_Av, _D3D_texture, _Short_vector_type_traits<_Value_type>::_Format_base_type_id == _Double_type ? _Uint_type : _Short_vector_type_traits<_Value_type>::_Format_base_type_id, &_Tex_ptr, _View_format); _ASSERTE(_Tex_ptr); return texture<_Value_type, _Rank>(_Ext, _Texture_descriptor(_Tex_ptr)); } /// /// Get the D3D sampler state interface on the given accelerator view that represents the specified sampler object. /// /// /// A D3D accelerator view on which the D3D sampler state is to be created. /// /// /// A sampler object for which the underlying D3D sampler state interface is created. /// /// /// The IUnknown interface pointer corresponding to the D3D sampler state that represents the given sampler. /// inline _Ret_ IUnknown * get_sampler(const Concurrency::accelerator_view &_Av, const sampler &_Sampler) __CPU_ONLY { return Concurrency::details::_D3D_interop::_Get_D3D_sampler(_Av, _Sampler._Get_sampler_ptr()); } /// /// Create a sampler from a D3D sampler state interface pointer. /// /// /// IUnknown interface pointer of the D3D sampler state to create the sampler from. /// /// /// A sampler represents the provided D3D sampler state. /// inline sampler make_sampler(_In_ IUnknown *_D3D_sampler) __CPU_ONLY { return sampler(_Sampler_descriptor(_Sampler::_Create(_D3D_interop::_Get_D3D_sampler_data_ptr(_D3D_sampler)))); } /// /// Compares a 4-byte reference value and an 8-byte source value and /// accumulates a vector of 4 sums. Each sum corresponds to the masked /// sum of absolute differences of different byte alignments between /// the reference value and the source value. /// /// /// The reference array of 4 bytes in one uint value /// /// /// The source array of 8 bytes in a vector of two uint values. /// /// /// A vector of 4 values to be added to the masked sum of absolute /// differences of the different byte alignments between the reference /// value and the source value. /// /// /// Returns a vector of 4 sums. Each sum corresponds to the masked sum /// of absolute differences of different byte alignments between the reference /// value and the source value. /// inline uint4 msad4(uint _Reference, uint2 _Source, uint4 _Accum) __GPU_ONLY { uint4 _Tmp; __dp_d3d_msad4(reinterpret_cast(&_Tmp), _Reference, _Source.x, _Source.y, _Accum.x, _Accum.y, _Accum.z, _Accum.w); return _Tmp; } } // namespace direct3d } //namespace graphics } //namespace Concurrency